^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Hank Janssen <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "hyperv_vmbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void init_vp_index(struct vmbus_channel *channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) const struct vmbus_device vmbus_devs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* IDE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) { .dev_type = HV_IDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) HV_IDE_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .perf_device = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* SCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { .dev_type = HV_SCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) HV_SCSI_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .perf_device = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Fibre Channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) { .dev_type = HV_FC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) HV_SYNTHFC_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .perf_device = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Synthetic NIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { .dev_type = HV_NIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) HV_NIC_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .perf_device = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Network Direct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) { .dev_type = HV_ND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) HV_ND_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .perf_device = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* PCIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) { .dev_type = HV_PCIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) HV_PCIE_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Synthetic Frame Buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { .dev_type = HV_FB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) HV_SYNTHVID_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Synthetic Keyboard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) { .dev_type = HV_KBD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) HV_KBD_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Synthetic MOUSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) { .dev_type = HV_MOUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) HV_MOUSE_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* KVP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) { .dev_type = HV_KVP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) HV_KVP_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Time Synch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) { .dev_type = HV_TS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) HV_TS_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Heartbeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { .dev_type = HV_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) HV_HEART_BEAT_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { .dev_type = HV_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) HV_SHUTDOWN_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* File copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { .dev_type = HV_FCOPY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) HV_FCOPY_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Backup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { .dev_type = HV_BACKUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) HV_VSS_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Dynamic Memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) { .dev_type = HV_DM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) HV_DM_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Unknown GUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { .dev_type = HV_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .perf_device = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) guid_t guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) } vmbus_unsupported_devs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { HV_AVMA1_GUID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) { HV_AVMA2_GUID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) { HV_RDV_GUID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * The rescinded channel may be blocked waiting for a response from the host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * take care of that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) channel->rescind = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (msginfo->waiting_channel == channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) complete(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static bool is_unsupported_vmbus_devs(const guid_t *guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static u16 hv_get_dev_type(const struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const guid_t *guid = &channel->offermsg.offer.if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return HV_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) for (i = HV_IDE; i < HV_UNKNOWN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (guid_equal(guid, &vmbus_devs[i].guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_info("Unknown GUID: %pUl\n", guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @icmsghdrp: Pointer to msg header structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @buf: Raw buffer channel data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @fw_version: The framework versions we can support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @fw_vercnt: The size of @fw_version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @srv_version: The service versions we can support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @srv_vercnt: The size of @srv_version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * @nego_fw_version: The selected framework version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @nego_srv_version: The selected service version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Note: Versions are given in decreasing order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Set up and fill in default negotiate response message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Mainly used by Hyper-V drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u8 *buf, const int *fw_version, int fw_vercnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const int *srv_version, int srv_vercnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int *nego_fw_version, int *nego_srv_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int icframe_major, icframe_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int icmsg_major, icmsg_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int fw_major, fw_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int srv_major, srv_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bool found_match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct icmsg_negotiate *negop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) icmsghdrp->icmsgsize = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) negop = (struct icmsg_negotiate *)&buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sizeof(struct vmbuspipe_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sizeof(struct icmsg_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) icframe_major = negop->icframe_vercnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) icframe_minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) icmsg_major = negop->icmsg_vercnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) icmsg_minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Select the framework version number we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) for (i = 0; i < fw_vercnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) fw_major = (fw_version[i] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) fw_minor = (fw_version[i] & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) for (j = 0; j < negop->icframe_vercnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if ((negop->icversion_data[j].major == fw_major) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (negop->icversion_data[j].minor == fw_minor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) icframe_major = negop->icversion_data[j].major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) icframe_minor = negop->icversion_data[j].minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) found_match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (found_match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!found_match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto fw_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) found_match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (i = 0; i < srv_vercnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) srv_major = (srv_version[i] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) srv_minor = (srv_version[i] & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (j = negop->icframe_vercnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) (j < negop->icframe_vercnt + negop->icmsg_vercnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if ((negop->icversion_data[j].major == srv_major) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) (negop->icversion_data[j].minor == srv_minor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) icmsg_major = negop->icversion_data[j].major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) icmsg_minor = negop->icversion_data[j].minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) found_match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (found_match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Respond with the framework and service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * version numbers we can support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) fw_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!found_match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) negop->icframe_vercnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) negop->icmsg_vercnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) negop->icframe_vercnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) negop->icmsg_vercnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (nego_fw_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *nego_fw_version = (icframe_major << 16) | icframe_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (nego_srv_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) negop->icversion_data[0].major = icframe_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) negop->icversion_data[0].minor = icframe_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) negop->icversion_data[1].major = icmsg_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) negop->icversion_data[1].minor = icmsg_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return found_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * alloc_channel - Allocate and initialize a vmbus channel object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static struct vmbus_channel *alloc_channel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct vmbus_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_lock_init(&channel->sched_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) init_completion(&channel->rescind_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) INIT_LIST_HEAD(&channel->sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) tasklet_init(&channel->callback_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) vmbus_on_event, (unsigned long)channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) hv_ringbuffer_pre_init(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * free_channel - Release the resources used by the vmbus channel object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void free_channel(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) tasklet_kill(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) vmbus_remove_channel_attr_group(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kobject_put(&channel->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void vmbus_channel_map_relid(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * The mapping of the channel's relid is visible from the CPUs that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * execute:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * (a) In the "normal (i.e., not resuming from hibernation)" path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * the full barrier in smp_store_mb() guarantees that the store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * is propagated to all CPUs before the add_channel_work work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * is queued. In turn, add_channel_work is queued before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * channel's ring buffer is allocated/initialized and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * OPENCHANNEL message for the channel is sent in vmbus_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Hyper-V won't start sending the interrupts for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * before the OPENCHANNEL message is acked. The memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * that vmbus_chan_sched() must find the channel's relid in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * recv_int_page before retrieving the channel pointer from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * array of channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * (b) In the "resuming from hibernation" path, the smp_store_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * guarantees that the store is propagated to all CPUs before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * the VMBus connection is marked as ready for the resume event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * (cf. check_ready_for_resume_event()). The interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * of the VMBus driver and vmbus_chan_sched() can not run before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * vmbus_bus_resume() has completed execution (cf. resume_noirq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) smp_store_mb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vmbus_connection.channels[channel->offermsg.child_relid],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) WRITE_ONCE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) vmbus_connection.channels[channel->offermsg.child_relid],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void vmbus_release_relid(u32 relid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct vmbus_channel_relid_released msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) msg.child_relid = relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) trace_vmbus_release_relid(&msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void hv_process_channel_removal(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) lockdep_assert_held(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) BUG_ON(!channel->rescind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * hv_process_channel_removal() could find INVALID_RELID only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * hv_sock channels. See the inline comments in vmbus_onoffer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) !is_hvsock_channel(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Upon suspend, an in-use hv_sock channel is removed from the array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * channels and the relid is invalidated. After hibernation, when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * user-space appplication destroys the channel, it's unnecessary and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * unsafe to remove the channel from the array of channels. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * the inline comments before the call of vmbus_release_relid() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (channel->offermsg.child_relid != INVALID_RELID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) vmbus_channel_unmap_relid(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (channel->primary_channel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) list_del(&channel->listentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) list_del(&channel->sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * If this is a "perf" channel, updates the hv_numa_map[] masks so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * init_vp_index() can (re-)use the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (hv_is_perf_channel(channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) hv_clear_alloced_cpu(channel->target_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * the relid is invalidated; after hibernation, when the user-space app
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * destroys the channel, the relid is INVALID_RELID, and in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * it's unnecessary and unsafe to release the old relid, since the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * relid can refer to a completely different channel now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (channel->offermsg.child_relid != INVALID_RELID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) vmbus_release_relid(channel->offermsg.child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) free_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) void vmbus_free_channels(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct vmbus_channel *channel, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) listentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* hv_process_channel_removal() needs this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) channel->rescind = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) vmbus_device_unregister(channel->device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Note: the function can run concurrently for primary/sub channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void vmbus_add_channel_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct vmbus_channel *newchannel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) container_of(work, struct vmbus_channel, add_channel_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct vmbus_channel *primary_channel = newchannel->primary_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * This state is used to indicate a successful open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * so that when we do close the channel normally, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * can cleanup properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) newchannel->state = CHANNEL_OPEN_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (primary_channel != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* newchannel is a sub-channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct hv_device *dev = primary_channel->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (vmbus_add_channel_kobj(dev, newchannel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto err_deq_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (primary_channel->sc_creation_callback != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) primary_channel->sc_creation_callback(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) newchannel->probe_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Start the process of binding the primary channel to the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) newchannel->device_obj = vmbus_device_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) &newchannel->offermsg.offer.if_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &newchannel->offermsg.offer.if_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!newchannel->device_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto err_deq_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) newchannel->device_obj->device_id = newchannel->device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Add the new device to the bus. This will kick off device-driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * binding which eventually invokes the device driver's AddDevice()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = vmbus_device_register(newchannel->device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pr_err("unable to add child device object (relid %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) newchannel->offermsg.child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) kfree(newchannel->device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto err_deq_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) newchannel->probe_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) err_deq_chan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * We need to set the flag, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * vmbus_onoffer_rescind() can be blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) newchannel->probe_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (primary_channel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) list_del(&newchannel->listentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) list_del(&newchannel->sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* vmbus_process_offer() has mapped the channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) vmbus_channel_unmap_relid(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) vmbus_release_relid(newchannel->offermsg.child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) free_channel(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * vmbus_process_offer - Process the offer by creating a channel/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * associated with this offer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void vmbus_process_offer(struct vmbus_channel *newchannel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct vmbus_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) bool fnew = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Synchronize vmbus_process_offer() and CPU hotplugging:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * CPU1 CPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * [vmbus_process_offer()] [Hot removal of the CPU]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * CPU_READ_LOCK CPUS_WRITE_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * LOAD cpu_online_mask SEARCH chn_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * STORE target_cpu LOAD target_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * INSERT chn_list STORE cpu_online_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * CPU2's SEARCH from *not* seeing CPU1's INSERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * CPU2's LOAD from *not* seing CPU1's STORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Serializes the modifications of the chn_list list as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * the accesses to next_numa_node_id in init_vp_index().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) init_vp_index(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Remember the channels that should be cleaned up upon suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Now that we have acquired the channel_mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * we can release the potentially racing rescind thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) atomic_dec(&vmbus_connection.offer_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (guid_equal(&channel->offermsg.offer.if_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) &newchannel->offermsg.offer.if_type) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) guid_equal(&channel->offermsg.offer.if_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) &newchannel->offermsg.offer.if_instance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) fnew = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (fnew) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) list_add_tail(&newchannel->listentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) &vmbus_connection.chn_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Check to see if this is a valid sub-channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (newchannel->offermsg.offer.sub_channel_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Don't call free_channel(), because newchannel->kobj
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * is not initialized yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) kfree(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Process the sub-channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) newchannel->primary_channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) list_add_tail(&newchannel->sc_list, &channel->sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) vmbus_channel_map_relid(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * vmbus_process_offer() mustn't call channel->sc_creation_callback()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * directly for sub-channels, because sc_creation_callback() ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * vmbus_open() may never get the host's response to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * OPEN_CHANNEL message (the host may rescind a channel at any time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * may not wake up the vmbus_open() as it's blocked due to a non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * vmbus_connection.offer_in_progress, and finally we have a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * The above is also true for primary channels, if the related device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * drivers use sync probing mode by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * And, usually the handling of primary channels and sub-channels can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * depend on each other, so we should offload them to different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * and waits for all the sub-channels to appear, but the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * can't get the rtnl_lock and this blocks the handling of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * sub-channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) wq = fnew ? vmbus_connection.handle_primary_chan_wq :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) vmbus_connection.handle_sub_chan_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) queue_work(wq, &newchannel->add_channel_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * We use this state to statically distribute the channel interrupt load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int next_numa_node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Starting with Win8, we can statically distribute the incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * channel interrupt load by binding a channel to VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * For pre-win8 hosts or non-performance critical channels we assign the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * VMBUS_CONNECT_CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Starting with win8, performance critical channels will be distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * evenly among all the available NUMA nodes. Once the node is assigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * we will assign the CPU based on a simple round robin scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static void init_vp_index(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bool perf_chn = hv_is_perf_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cpumask_var_t available_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct cpumask *alloced_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 target_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if ((vmbus_proto_version == VERSION_WS2008) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Prior to win8, all channel interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * delivered on VMBUS_CONNECT_CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Also if the channel is not a performance critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * channel, bind it to VMBUS_CONNECT_CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * In case alloc_cpumask_var() fails, bind it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * VMBUS_CONNECT_CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) channel->target_cpu = VMBUS_CONNECT_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (perf_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) numa_node = next_numa_node_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (numa_node == nr_node_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) next_numa_node_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (cpumask_empty(cpumask_of_node(numa_node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) alloced_mask = &hv_context.hv_numa_map[numa_node];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (cpumask_weight(alloced_mask) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cpumask_weight(cpumask_of_node(numa_node))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * We have cycled through all the CPUs in the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * reset the alloced map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cpumask_clear(alloced_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) target_cpu = cpumask_first(available_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) cpumask_set_cpu(target_cpu, alloced_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) channel->target_cpu = target_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) free_cpumask_var(available_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) #define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) #define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) #define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) #define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void vmbus_wait_for_unload(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) void *page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct hv_message *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct vmbus_channel_message_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u32 message_type, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * used for initial contact or to CPU0 depending on host version. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * we're crashing on a different CPU let's hope that IRQ handler on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * functional and vmbus_unload_response() will complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * vmbus_connection.unload_event. If not, the last thing we can do is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * read message pages for all CPUs directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Wait up to 100 seconds since an Azure host must writeback any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * data in its disk cache before the VMbus UNLOAD request will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * complete. This flushing has been empirically observed to take up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * to 50 seconds in cases with a lot of dirty data, so allow additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * leeway and for inaccuracies in mdelay(). But eventually time out so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * that the panic path can't get hung forever in case the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * message isn't seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (completion_done(&vmbus_connection.unload_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) goto completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct hv_per_cpu_context *hv_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) page_addr = hv_cpu->synic_message_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) msg = (struct hv_message *)page_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) + VMBUS_MESSAGE_SINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) message_type = READ_ONCE(msg->header.message_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (message_type == HVMSG_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hdr = (struct vmbus_channel_message_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) msg->u.payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) complete(&vmbus_connection.unload_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) vmbus_signal_eom(msg, message_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * Give a notice periodically so someone watching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * serial output won't think it is completely hung.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!(i % UNLOAD_MSG_LOOPS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) pr_notice("Waiting for VMBus UNLOAD to complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mdelay(UNLOAD_DELAY_UNIT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) pr_err("Continuing even though VMBus UNLOAD did not complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) completed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * maybe-pending messages on all CPUs to be able to receive new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * messages after we reconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct hv_per_cpu_context *hv_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) page_addr = hv_cpu->synic_message_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) msg->header.message_type = HVMSG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * vmbus_unload_response - Handler for the unload response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * This is a global event; just wakeup the waiting thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Once we successfully unload, we can cleanup the monitor state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) complete(&vmbus_connection.unload_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) void vmbus_initiate_unload(bool crash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct vmbus_channel_message_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Pre-Win2012R2 hosts don't support reconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (vmbus_proto_version < VERSION_WIN8_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) init_completion(&vmbus_connection.unload_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) hdr.msgtype = CHANNELMSG_UNLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) !crash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * vmbus_initiate_unload() is also called on crash and the crash can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * happening in an interrupt context, where scheduling is impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!crash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) wait_for_completion(&vmbus_connection.unload_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) vmbus_wait_for_unload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static void check_ready_for_resume_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * If all the old primary channels have been fixed up, then it's safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) complete(&vmbus_connection.ready_for_resume_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static void vmbus_setup_channel_state(struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct vmbus_channel_offer_channel *offer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Setup state for signalling the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (vmbus_proto_version != VERSION_WS2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) channel->is_dedicated_interrupt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) (offer->is_dedicated_interrupt != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) channel->sig_event = offer->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) memcpy(&channel->offermsg, offer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) sizeof(struct vmbus_channel_offer_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) channel->monitor_grp = (u8)offer->monitorid / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) channel->monitor_bit = (u8)offer->monitorid % 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) channel->device_id = hv_get_dev_type(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * find_primary_channel_by_offer - Get the channel object given the new offer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * This is only used in the resume path of hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static struct vmbus_channel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct vmbus_channel *channel = NULL, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) const guid_t *inst1, *inst2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Ignore sub-channel offers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (offer->offer.sub_channel_index != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) inst1 = &iter->offermsg.offer.if_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) inst2 = &offer->offer.if_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (guid_equal(inst1, inst2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) channel = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct vmbus_channel_offer_channel *offer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct vmbus_channel *oldchannel, *newchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) size_t offer_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) offer = (struct vmbus_channel_offer_channel *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) trace_vmbus_onoffer(offer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) oldchannel = find_primary_channel_by_offer(offer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (oldchannel != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * We're resuming from hibernation: all the sub-channel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * hv_sock channels we had before the hibernation should have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * been cleaned up, and now we must be seeing a re-offered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * primary channel that we had before the hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * { Initially: channel relid = INVALID_RELID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * channels[valid_relid] = NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * CPU1 CPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * [vmbus_onoffer()] [vmbus_device_release()]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * LOCK channel_mutex LOCK channel_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * STORE channel relid = valid_relid LOAD r1 = channel relid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * MAP_RELID channel if (r1 != INVALID_RELID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * UNLOCK channel_mutex UNMAP_RELID channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * UNLOCK channel_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Forbids: r1 == valid_relid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * channels[valid_relid] == channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Note. r1 can be INVALID_RELID only for an hv_sock channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * None of the hv_sock channels which were present before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * suspend are re-offered upon the resume. See the WARN_ON()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * in hv_process_channel_removal().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) atomic_dec(&vmbus_connection.offer_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Fix up the relid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) oldchannel->offermsg.child_relid = offer->child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) offer_sz = sizeof(*offer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * This is not an error, since the host can also change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * the other field(s) of the offer, e.g. on WS RS5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * (Build 17763), the offer->connection_id of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Mellanox VF vmbus device can change when the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * reoffers the device upon resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) pr_debug("vmbus offer changed: relid=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) offer->child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) print_hex_dump_debug("Old vmbus offer: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) DUMP_PREFIX_OFFSET, 16, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) &oldchannel->offermsg, offer_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) print_hex_dump_debug("New vmbus offer: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) DUMP_PREFIX_OFFSET, 16, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) offer, offer_sz, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Fix up the old channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) vmbus_setup_channel_state(oldchannel, offer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* Add the channel back to the array of channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) vmbus_channel_map_relid(oldchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) check_ready_for_resume_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Allocate the channel object and save this offer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) newchannel = alloc_channel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!newchannel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) vmbus_release_relid(offer->child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) atomic_dec(&vmbus_connection.offer_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) pr_err("Unable to allocate channel object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) vmbus_setup_channel_state(newchannel, offer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) vmbus_process_offer(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static void check_ready_for_suspend_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * If all the sub-channels or hv_sock channels have been cleaned up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * then it's safe to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) complete(&vmbus_connection.ready_for_suspend_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * vmbus_onoffer_rescind - Rescind offer handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * We queue a work item to process this offer synchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct vmbus_channel_rescind_offer *rescind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct vmbus_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) bool clean_up_chan_for_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rescind = (struct vmbus_channel_rescind_offer *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) trace_vmbus_onoffer_rescind(rescind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * The offer msg and the corresponding rescind msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * from the host are guranteed to be ordered -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * offer comes in first and then the rescind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Since we process these events in work elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * and with preemption, we may end up processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * the events out of order. We rely on the synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * provided by offer_in_progress and by channel_mutex for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * ordering these events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * { Initially: offer_in_progress = 1 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * CPU1 CPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * [vmbus_onoffer()] [vmbus_onoffer_rescind()]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * LOCK channel_mutex WAIT_ON offer_in_progress == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * DECREMENT offer_in_progress LOCK channel_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * STORE channels[] LOAD channels[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * UNLOCK channel_mutex UNLOCK channel_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * We wait here until any channel offer is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) channel = relid2channel(rescind->child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (channel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * We failed in processing the offer message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * we would have cleaned up the relid in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * failure path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) is_sub_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * Before setting channel->rescind in vmbus_rescind_cleanup(), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * should make sure the channel callback is not running any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) vmbus_reset_channel_cb(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Now wait for offer handling to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) vmbus_rescind_cleanup(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) while (READ_ONCE(channel->probe_done) == false) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * We wait here until any channel offer is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * At this point, the rescind handling can proceed safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (channel->device_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (channel->chn_rescind_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) channel->chn_rescind_callback(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (clean_up_chan_for_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) check_ready_for_suspend_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * We will have to unregister this device from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * driver core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dev = get_device(&channel->device_obj->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) vmbus_device_unregister(channel->device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) } else if (channel->primary_channel != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Sub-channel is being rescinded. Following is the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * close sequence when initiated from the driveri (refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * vmbus_close() for details):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * 1. Close all sub-channels first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * 2. Then close the primary channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (channel->state == CHANNEL_OPEN_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * The channel is currently not open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * it is safe for us to cleanup the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) hv_process_channel_removal(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) complete(&channel->rescind_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* The "channel" may have been freed. Do not access it any longer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (clean_up_chan_for_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) check_ready_for_suspend_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) BUG_ON(!is_hvsock_channel(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* We always get a rescind msg when a connection is closed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) vmbus_device_unregister(channel->device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * vmbus_onoffers_delivered -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * This is invoked when all offers have been delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Nothing to do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void vmbus_onoffers_delivered(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * vmbus_onopen_result - Open result handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * This is invoked when we received a response to our channel open request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Find the matching request, copy the response and signal the requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct vmbus_channel_open_result *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct vmbus_channel_message_header *requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct vmbus_channel_open_channel *openmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) result = (struct vmbus_channel_open_result *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) trace_vmbus_onopen_result(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * Find the open msg, copy the result and signal/unblock the wait event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) requestheader =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) (struct vmbus_channel_message_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) openmsg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) (struct vmbus_channel_open_channel *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (openmsg->child_relid == result->child_relid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) openmsg->openid == result->openid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) memcpy(&msginfo->response.open_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct vmbus_channel_open_result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) complete(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * vmbus_ongpadl_created - GPADL created handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * This is invoked when we received a response to our gpadl create request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Find the matching request, copy the response and signal the requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct vmbus_channel_gpadl_created *gpadlcreated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct vmbus_channel_message_header *requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct vmbus_channel_gpadl_header *gpadlheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) trace_vmbus_ongpadl_created(gpadlcreated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * Find the establish msg, copy the result and signal/unblock the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) requestheader =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) (struct vmbus_channel_message_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) gpadlheader =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) (struct vmbus_channel_gpadl_header *)requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if ((gpadlcreated->child_relid ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) gpadlheader->child_relid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) (gpadlcreated->gpadl == gpadlheader->gpadl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) memcpy(&msginfo->response.gpadl_created,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) gpadlcreated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct vmbus_channel_gpadl_created));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) complete(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * vmbus_ongpadl_torndown - GPADL torndown handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * This is invoked when we received a response to our gpadl teardown request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * Find the matching request, copy the response and signal the requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static void vmbus_ongpadl_torndown(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct vmbus_channel_gpadl_torndown *gpadl_torndown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct vmbus_channel_message_header *requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct vmbus_channel_gpadl_teardown *gpadl_teardown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) trace_vmbus_ongpadl_torndown(gpadl_torndown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * Find the open msg, copy the result and signal/unblock the wait event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) requestheader =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) (struct vmbus_channel_message_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) gpadl_teardown =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) (struct vmbus_channel_gpadl_teardown *)requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) memcpy(&msginfo->response.gpadl_torndown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) gpadl_torndown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct vmbus_channel_gpadl_torndown));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) complete(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * vmbus_onversion_response - Version response handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * This is invoked when we received a response to our initiate contact request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * Find the matching request, copy the response and signal the requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static void vmbus_onversion_response(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct vmbus_channel_message_header *requestheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct vmbus_channel_version_response *version_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) version_response = (struct vmbus_channel_version_response *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) trace_vmbus_onversion_response(version_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) requestheader =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) (struct vmbus_channel_message_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (requestheader->msgtype ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) CHANNELMSG_INITIATE_CONTACT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) memcpy(&msginfo->response.version_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) version_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) sizeof(struct vmbus_channel_version_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) complete(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Channel message dispatch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) const struct vmbus_channel_message_table_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) channel_message_table[CHANNELMSG_COUNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) { CHANNELMSG_INVALID, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sizeof(struct vmbus_channel_offer_channel)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) sizeof(struct vmbus_channel_rescind_offer) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) sizeof(struct vmbus_channel_open_result)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sizeof(struct vmbus_channel_gpadl_created)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) sizeof(struct vmbus_channel_gpadl_torndown) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) sizeof(struct vmbus_channel_version_response)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) { CHANNELMSG_UNLOAD, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) { CHANNELMSG_18, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) { CHANNELMSG_19, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) { CHANNELMSG_20, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * vmbus_onmessage - Handler for channel protocol messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * This is invoked in the vmbus worker thread context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) trace_vmbus_on_message(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * out of bound and the message_handler pointer can not be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) channel_message_table[hdr->msgtype].message_handler(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * vmbus_request_offers - Send a request to get all our pending offers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) int vmbus_request_offers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct vmbus_channel_message_header *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct vmbus_channel_msginfo *msginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) msginfo = kmalloc(sizeof(*msginfo) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) sizeof(struct vmbus_channel_message_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!msginfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) msg = (struct vmbus_channel_message_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) msg->msgtype = CHANNELMSG_REQUESTOFFERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) trace_vmbus_request_offers(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) pr_err("Unable to request offers - %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) kfree(msginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void invoke_sc_cb(struct vmbus_channel *primary_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct list_head *cur, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct vmbus_channel *cur_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (primary_channel->sc_creation_callback == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) primary_channel->sc_creation_callback(cur_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) void (*sc_cr_cb)(struct vmbus_channel *new_sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) primary_channel->sc_creation_callback = sc_cr_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ret = !list_empty(&primary->sc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * Invoke the callback on sub-channel creation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * This will present a uniform interface to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) invoke_sc_cb(primary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) void (*chn_rescind_cb)(struct vmbus_channel *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) channel->chn_rescind_callback = chn_rescind_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);