Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2009, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *   Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *   Hank Janssen  <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "hyperv_vmbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * (because of the alignment requirement), however, the hypervisor only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * total size that the guest uses minus twice of the gap size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	case HV_GPADL_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	case HV_GPADL_RING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		/* The size of a ringbuffer must be page-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		BUG_ON(size % PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		 * Two things to notice here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		 * 1) We're processing two ring buffers as a unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		 * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		 * the first guest-size page of each of the two ring buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		 * So we effectively subtract out two guest-size pages, and add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		 * back two Hyper-V size pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  *                                 HV_HYP_PAGE) in a ring gpadl based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  *                                 offset in the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * @offset: the offset (in bytes) where the send ringbuffer starts in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  *               virtual address space of the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	 * header (because of the alignment requirement), however, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	 * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	 * And to calculate the effective send offset in gpadl, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	 * substract this gap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  *                  the gpadl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * @type: the type of the gpadl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * @kbuffer: the pointer to the gpadl in the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * @size: the total size (in bytes) of the gpadl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  *               virtual address space of the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * @i: the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 				 u32 size, u32 send_offset, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	unsigned long delta = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	case HV_GPADL_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	case HV_GPADL_RING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		else if (i <= send_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * vmbus_setevent- Trigger an event notification on the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) void vmbus_setevent(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	struct hv_monitor_page *monitorpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	trace_vmbus_setevent(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * For channels marked as in "low latency" mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 * bypass the monitor page mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	if (channel->offermsg.monitor_allocated && !channel->low_latency) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		vmbus_send_interrupt(channel->offermsg.child_relid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		/* Get the child to parent monitor page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		monitorpage = vmbus_connection.monitor_pages[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		sync_set_bit(channel->monitor_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			(unsigned long *)&monitorpage->trigger_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 					[channel->monitor_grp].pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		vmbus_set_event(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) EXPORT_SYMBOL_GPL(vmbus_setevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* vmbus_free_ring - drop mapping of ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) void vmbus_free_ring(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	hv_ringbuffer_cleanup(&channel->outbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	hv_ringbuffer_cleanup(&channel->inbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (channel->ringbuffer_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		__free_pages(channel->ringbuffer_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			     get_order(channel->ringbuffer_pagecount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				       << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		channel->ringbuffer_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) EXPORT_SYMBOL_GPL(vmbus_free_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /* vmbus_alloc_ring - allocate and map pages for ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) int vmbus_alloc_ring(struct vmbus_channel *newchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		     u32 send_size, u32 recv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	/* Allocate the ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	order = get_order(send_size + recv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				GFP_KERNEL|__GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	newchannel->ringbuffer_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) /* Used for Hyper-V Socket: a guest client's connect() to the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 				  const guid_t *shv_host_servie_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct vmbus_channel_tl_connect_request conn_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	memset(&conn_msg, 0, sizeof(conn_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	conn_msg.host_service_id = *shv_host_servie_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	trace_vmbus_send_tl_connect_request(&conn_msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  Also, Hyper-V does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * ACK such messages.  IOW we can't know when the host will stop interrupting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * VERSION_WIN10_V4_1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct vmbus_channel_modifychannel conn_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	memset(&conn_msg, 0, sizeof(conn_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	conn_msg.child_relid = child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	conn_msg.target_vp = target_vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	trace_vmbus_send_modifychannel(&conn_msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * create_gpadl_header - Creates a gpadl for the specified buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			       u32 size, u32 send_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			       struct vmbus_channel_msginfo **msginfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	int pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct vmbus_channel_gpadl_header *gpadl_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	struct vmbus_channel_gpadl_body *gpadl_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct vmbus_channel_msginfo *msgheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct vmbus_channel_msginfo *msgbody = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u32 msgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	/* do we need a gpadl body msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		  sizeof(struct vmbus_channel_gpadl_header) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		  sizeof(struct gpa_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	pfncount = pfnsize / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if (pagecount > pfncount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		/* we need a gpadl body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		/* fill in the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		msgsize = sizeof(struct vmbus_channel_msginfo) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			  sizeof(struct vmbus_channel_gpadl_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			  sizeof(struct gpa_range) + pfncount * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		msgheader =  kzalloc(msgsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (!msgheader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		INIT_LIST_HEAD(&msgheader->submsglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		msgheader->msgsize = msgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		gpadl_header = (struct vmbus_channel_gpadl_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			msgheader->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		gpadl_header->rangecount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		gpadl_header->range_buflen = sizeof(struct gpa_range) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 					 pagecount * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		gpadl_header->range[0].byte_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		for (i = 0; i < pfncount; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				type, kbuffer, size, send_offset, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		*msginfo = msgheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		pfnsum = pfncount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		pfnleft = pagecount - pfncount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		/* how many pfns can we fit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			  sizeof(struct vmbus_channel_gpadl_body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		pfncount = pfnsize / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		/* fill in the body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		while (pfnleft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			if (pfnleft > pfncount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 				pfncurr = pfncount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				pfncurr = pfnleft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			msgsize = sizeof(struct vmbus_channel_msginfo) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				  sizeof(struct vmbus_channel_gpadl_body) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 				  pfncurr * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			msgbody = kzalloc(msgsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			if (!msgbody) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				struct vmbus_channel_msginfo *pos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 				struct vmbus_channel_msginfo *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				 * Free up all the allocated messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				list_for_each_entry_safe(pos, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 					&msgheader->submsglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 					msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 					list_del(&pos->msglistentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 					kfree(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			msgbody->msgsize = msgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			gpadl_body =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				(struct vmbus_channel_gpadl_body *)msgbody->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			 * Gpadl is u32 and we are using a pointer which could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			 * be 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			 * This is governed by the guest/host protocol and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			 * so the hypervisor guarantees that this is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			for (i = 0; i < pfncurr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 				gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 					kbuffer, size, send_offset, pfnsum + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			/* add to msg header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			list_add_tail(&msgbody->msglistentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				      &msgheader->submsglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			pfnsum += pfncurr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			pfnleft -= pfncurr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		/* everything fits in a header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		msgsize = sizeof(struct vmbus_channel_msginfo) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			  sizeof(struct vmbus_channel_gpadl_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			  sizeof(struct gpa_range) + pagecount * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		msgheader = kzalloc(msgsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if (msgheader == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		INIT_LIST_HEAD(&msgheader->submsglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		msgheader->msgsize = msgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		gpadl_header = (struct vmbus_channel_gpadl_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			msgheader->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		gpadl_header->rangecount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		gpadl_header->range_buflen = sizeof(struct gpa_range) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 					 pagecount * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		gpadl_header->range[0].byte_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		for (i = 0; i < pagecount; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 				type, kbuffer, size, send_offset, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		*msginfo = msgheader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	kfree(msgheader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	kfree(msgbody);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * @channel: a channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * @type: the type of the corresponding GPADL, only meaningful for the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  * @kbuffer: from kmalloc or vmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * @size: page-size multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * @send_offset: the offset (in bytes) where the send ring buffer starts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  * 		 should be 0 for BUFFER type gpadl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * @gpadl_handle: some funky thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 				   enum hv_gpadl_type type, void *kbuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				   u32 size, u32 send_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				   u32 *gpadl_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct vmbus_channel_gpadl_header *gpadlmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	struct vmbus_channel_gpadl_body *gpadl_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct vmbus_channel_msginfo *msginfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct vmbus_channel_msginfo *submsginfo, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct list_head *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	u32 next_gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	next_gpadl_handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	init_completion(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	msginfo->waiting_channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	gpadlmsg->child_relid = channel->offermsg.child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	gpadlmsg->gpadl = next_gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	list_add_tail(&msginfo->msglistentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		      &vmbus_connection.chn_msg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (channel->rescind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			     sizeof(*msginfo), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	list_for_each(curr, &msginfo->submsglist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		submsginfo = (struct vmbus_channel_msginfo *)curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		gpadl_body =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			(struct vmbus_channel_gpadl_body *)submsginfo->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		gpadl_body->header.msgtype =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			CHANNELMSG_GPADL_BODY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		gpadl_body->gpadl = next_gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		ret = vmbus_post_msg(gpadl_body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				     submsginfo->msgsize - sizeof(*submsginfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 				     true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		trace_vmbus_establish_gpadl_body(gpadl_body, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	wait_for_completion(&msginfo->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (msginfo->response.gpadl_created.creation_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		pr_err("Failed to establish GPADL: err = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		       msginfo->response.gpadl_created.creation_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		ret = -EDQUOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (channel->rescind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* At this point, we received the gpadl created msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	*gpadl_handle = gpadlmsg->gpadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	list_del(&msginfo->msglistentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				 msglistentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		kfree(submsginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	kfree(msginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * @channel: a channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * @kbuffer: from kmalloc or vmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * @size: page-size multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * @gpadl_handle: some funky thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			  u32 size, u32 *gpadl_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 				       0U, gpadl_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static int __vmbus_open(struct vmbus_channel *newchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		       void *userdata, u32 userdatalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		       void (*onchannelcallback)(void *context), void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct vmbus_channel_open_channel *open_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct vmbus_channel_msginfo *open_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct page *page = newchannel->ringbuffer_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	u32 send_pages, recv_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (userdatalen > MAX_USER_DEFINED_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	send_pages = newchannel->ringbuffer_send_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	recv_pages = newchannel->ringbuffer_pagecount - send_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (newchannel->state != CHANNEL_OPEN_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	newchannel->state = CHANNEL_OPENING_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	newchannel->onchannel_callback = onchannelcallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	newchannel->channel_callback_context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		goto error_clean_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	err = hv_ringbuffer_init(&newchannel->inbound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				 &page[send_pages], recv_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		goto error_clean_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* Establish the gpadl for the ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	newchannel->ringbuffer_gpadlhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 				      page_address(newchannel->ringbuffer_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				      (send_pages + recv_pages) << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				      newchannel->ringbuffer_send_offset << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				      &newchannel->ringbuffer_gpadlhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		goto error_clean_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/* Create and init the channel open message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	open_info = kmalloc(sizeof(*open_info) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			   sizeof(struct vmbus_channel_open_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (!open_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		goto error_free_gpadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	init_completion(&open_info->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	open_info->waiting_channel = newchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	open_msg->openid = newchannel->offermsg.child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	open_msg->child_relid = newchannel->offermsg.child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 * here we calculate it into HV_HYP_PAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	open_msg->downstream_ringbuffer_pageoffset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (userdatalen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		memcpy(open_msg->userdata, userdata, userdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	list_add_tail(&open_info->msglistentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		      &vmbus_connection.chn_msg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (newchannel->rescind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		goto error_clean_msglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	err = vmbus_post_msg(open_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			     sizeof(struct vmbus_channel_open_channel), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	trace_vmbus_open(open_msg, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		goto error_clean_msglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	wait_for_completion(&open_info->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	list_del(&open_info->msglistentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (newchannel->rescind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		goto error_free_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (open_info->response.open_result.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		goto error_free_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	newchannel->state = CHANNEL_OPENED_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	kfree(open_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) error_clean_msglist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	list_del(&open_info->msglistentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) error_free_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	kfree(open_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) error_free_gpadl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	newchannel->ringbuffer_gpadlhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) error_clean_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	hv_ringbuffer_cleanup(&newchannel->outbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	hv_ringbuffer_cleanup(&newchannel->inbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	newchannel->state = CHANNEL_OPEN_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * vmbus_connect_ring - Open the channel but reuse ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) int vmbus_connect_ring(struct vmbus_channel *newchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		       void (*onchannelcallback)(void *context), void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) EXPORT_SYMBOL_GPL(vmbus_connect_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * vmbus_open - Open the specified channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) int vmbus_open(struct vmbus_channel *newchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	       void *userdata, u32 userdatalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	       void (*onchannelcallback)(void *context), void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			       recv_ringbuffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	err = __vmbus_open(newchannel, userdata, userdatalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			   onchannelcallback, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		vmbus_free_ring(newchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) EXPORT_SYMBOL_GPL(vmbus_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * vmbus_teardown_gpadl -Teardown the specified GPADL handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct vmbus_channel_gpadl_teardown *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	struct vmbus_channel_msginfo *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	info = kmalloc(sizeof(*info) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	init_completion(&info->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	info->waiting_channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	msg->child_relid = channel->offermsg.child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	msg->gpadl = gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	list_add_tail(&info->msglistentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		      &vmbus_connection.chn_msg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		goto post_msg_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			     true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	trace_vmbus_teardown_gpadl(msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		goto post_msg_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	wait_for_completion(&info->waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) post_msg_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * If the channel has been rescinded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * we will be awakened by the rescind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * handler; set the error code to zero so we don't leak memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	list_del(&info->msglistentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) void vmbus_reset_channel_cb(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * vmbus_on_event(), running in the per-channel tasklet, can race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * with vmbus_close_internal() in the case of SMP guest, e.g., when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * the former is accessing channel->inbound.ring_buffer, the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * could be freeing the ring_buffer pages, so here we must stop it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * vmbus_chan_sched() might call the netvsc driver callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * that ends up scheduling NAPI work that accesses the ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * At this point, we have to ensure that any such work is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 * and that the channel ring buffer is no longer being accessed, cf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 * the calls to napi_disable() in netvsc_device_remove().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	tasklet_disable(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/* See the inline comments in vmbus_chan_sched(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	spin_lock_irqsave(&channel->sched_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	channel->onchannel_callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	spin_unlock_irqrestore(&channel->sched_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	channel->sc_creation_callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* Re-enable tasklet for use on re-open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	tasklet_enable(&channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static int vmbus_close_internal(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct vmbus_channel_close_channel *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	vmbus_reset_channel_cb(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * In case a device driver's probe() fails (e.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * rescinded later (e.g., we dynamically disable an Integrated Service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * here we should skip most of the below cleanup work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	if (channel->state != CHANNEL_OPENED_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	channel->state = CHANNEL_OPEN_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Send a closing message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	msg = &channel->close_msg.msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	msg->child_relid = channel->offermsg.child_relid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			     true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	trace_vmbus_close_internal(msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		pr_err("Close failed: close post msg return is %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		 * If we failed to post the close msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		 * it is perhaps better to leak memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* Tear down the gpadl for the channel's ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	else if (channel->ringbuffer_gpadlhandle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		ret = vmbus_teardown_gpadl(channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 					   channel->ringbuffer_gpadlhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			pr_err("Close failed: teardown gpadl return %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			 * If we failed to teardown gpadl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			 * it is perhaps better to leak memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		channel->ringbuffer_gpadlhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) /* disconnect ring - close all channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) int vmbus_disconnect_ring(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct vmbus_channel *cur_channel, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (channel->primary_channel != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (cur_channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			wait_for_completion(&cur_channel->rescind_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		if (vmbus_close_internal(cur_channel) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			vmbus_free_ring(cur_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			if (cur_channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				hv_process_channel_removal(cur_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 * Now close the primary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	ret = vmbus_close_internal(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * vmbus_close - Close the specified channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) void vmbus_close(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (vmbus_disconnect_ring(channel) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		vmbus_free_ring(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) EXPORT_SYMBOL_GPL(vmbus_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * vmbus_sendpacket() - Send the specified buffer on the given channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * @channel: Pointer to vmbus_channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * @buffer: Pointer to the buffer you want to send the data from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * @bufferlen: Maximum size of what the buffer holds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * @requestid: Identifier of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * @type: Type of packet that is being sent e.g. negotiate, time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *	  packet etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * Sends data in @buffer directly to Hyper-V via the vmbus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * This will send the data unparsed to Hyper-V.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * Mainly used by Hyper-V drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			   u32 bufferlen, u64 requestid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			   enum vmbus_packet_type type, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct vmpacket_descriptor desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct kvec bufferlist[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	u64 aligned_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int num_vecs = ((bufferlen != 0) ? 3 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* Setup the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	desc.type = type; /* VmbusPacketTypeDataInBand; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/* in 8-bytes granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	desc.len8 = (u16)(packetlen_aligned >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	desc.trans_id = requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	bufferlist[0].iov_base = &desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	bufferlist[1].iov_base = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	bufferlist[1].iov_len = bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	bufferlist[2].iov_base = &aligned_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	return hv_ringbuffer_write(channel, bufferlist, num_vecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) EXPORT_SYMBOL(vmbus_sendpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * packets using a GPADL Direct packet type. This interface allows you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * to control notifying the host. This will be useful for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * batched data. Also the sender can control the send flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				struct hv_page_buffer pagebuffers[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 				u32 pagecount, void *buffer, u32 bufferlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 				u64 requestid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct vmbus_channel_packet_page_buffer desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	u32 descsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	u32 packetlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	u32 packetlen_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	struct kvec bufferlist[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	u64 aligned_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * largest size we support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			  sizeof(struct hv_page_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	packetlen = descsize + bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* Setup the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	desc.length8 = (u16)(packetlen_aligned >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	desc.transactionid = requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	desc.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	desc.rangecount = pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	for (i = 0; i < pagecount; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		desc.range[i].len = pagebuffers[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		desc.range[i].offset = pagebuffers[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		desc.range[i].pfn	 = pagebuffers[i].pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	bufferlist[0].iov_base = &desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	bufferlist[0].iov_len = descsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	bufferlist[1].iov_base = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	bufferlist[1].iov_len = bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	bufferlist[2].iov_base = &aligned_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return hv_ringbuffer_write(channel, bufferlist, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * using a GPADL Direct packet type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * The buffer includes the vmbus descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			      struct vmbus_packet_mpb_array *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			      u32 desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			      void *buffer, u32 bufferlen, u64 requestid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	u32 packetlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	u32 packetlen_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct kvec bufferlist[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	u64 aligned_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	packetlen = desc_size + bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	/* Setup the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	desc->length8 = (u16)(packetlen_aligned >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	desc->transactionid = requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	desc->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	desc->rangecount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	bufferlist[0].iov_base = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	bufferlist[0].iov_len = desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	bufferlist[1].iov_base = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	bufferlist[1].iov_len = bufferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	bufferlist[2].iov_base = &aligned_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return hv_ringbuffer_write(channel, bufferlist, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * @channel: Pointer to vmbus_channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * @buffer: Pointer to the buffer you want to receive the data into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * @bufferlen: Maximum size of what the buffer can hold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * @buffer_actual_len: The actual size of the data after it was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * @requestid: Identifier of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * @raw: true means keep the vmpacket_descriptor header in the received data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * Receives directly from the hyper-v vmbus and puts the data it received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * into Buffer. This will receive the data unparsed from hyper-v.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * Mainly used by Hyper-V drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		   u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		   bool raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	return hv_ringbuffer_read(channel, buffer, bufferlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				  buffer_actual_len, requestid, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		     u32 bufferlen, u32 *buffer_actual_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		     u64 *requestid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	return __vmbus_recvpacket(channel, buffer, bufferlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				  buffer_actual_len, requestid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) EXPORT_SYMBOL(vmbus_recvpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			      u32 bufferlen, u32 *buffer_actual_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			      u64 *requestid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return __vmbus_recvpacket(channel, buffer, bufferlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				  buffer_actual_len, requestid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);