Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2012, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *   K. Y. Srinivasan <kys@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/memory_hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/percpu_counter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/hyperv-tlfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "hv_trace_balloon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * We begin with definitions supporting the Dynamic Memory protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * Begin protocol definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * Protocol versions. The low word is the minor version, the high word the major
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * History:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Initial version 1.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * Changed to 0.1 on 2009/03/25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * Changes to 0.2 on 2009/05/14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * Changes to 0.3 on 2009/12/03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * Changed to 1.0 on 2011/04/05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * Message Types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) enum dm_message_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 * Version 0.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	DM_ERROR			= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	DM_VERSION_REQUEST		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	DM_VERSION_RESPONSE		= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	DM_CAPABILITIES_REPORT		= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	DM_CAPABILITIES_RESPONSE	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	DM_STATUS_REPORT		= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	DM_BALLOON_REQUEST		= 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	DM_BALLOON_RESPONSE		= 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	DM_UNBALLOON_REQUEST		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	DM_UNBALLOON_RESPONSE		= 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	DM_MEM_HOT_ADD_REQUEST		= 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	DM_MEM_HOT_ADD_RESPONSE		= 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	DM_VERSION_03_MAX		= 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * Version 1.0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	DM_INFO_MESSAGE			= 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	DM_VERSION_1_MAX		= 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * Structures defining the dynamic memory management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) union dm_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		__u16 minor_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		__u16 major_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	__u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) union dm_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		__u64 balloon:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		__u64 hot_add:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		 * To support guests that may have alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		 * limitations on hot-add, the guest can specify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		 * its alignment requirements; a value of n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		 * represents an alignment of 2^n in mega bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		__u64 hot_add_alignment:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		__u64 reservedz:58;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	} cap_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	__u64 caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) union dm_mem_page_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		 * The PFN number of the first page in the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		 * 40 bits is the architectural limit of a PFN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		 * number for AMD64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		__u64 start_page:40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		 * The number of pages in the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		__u64 page_cnt:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	} finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	__u64  page_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * The header for all dynamic memory messages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * type: Type of the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * size: Size of the message in bytes; including the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * trans_id: The guest is responsible for manufacturing this ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) struct dm_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	__u16 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	__u16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	__u32 trans_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * A generic message format for dynamic memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * Specific message formats are defined later in the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) struct dm_message {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	__u8 data[]; /* enclosed message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * Specific message types supporting the dynamic memory protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * Version negotiation message. Sent from the guest to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * The guest is free to try different versions until the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * accepts the version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * dm_version: The protocol version requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * is_last_attempt: If TRUE, this is the last version guest will request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * reservedz: Reserved field, set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) struct dm_version_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	union dm_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	__u32 is_last_attempt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	__u32 reservedz:31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * Version response message; Host to Guest and indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * if the host has accepted the version sent by the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * is_accepted: If TRUE, host has accepted the version and the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * should proceed to the next stage of the protocol. FALSE indicates that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * guest should re-try with a different version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * reservedz: Reserved field, set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) struct dm_version_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	__u64 is_accepted:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	__u64 reservedz:63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * Message reporting capabilities. This is sent from the guest to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) struct dm_capabilities {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	union dm_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	__u64 min_page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	__u64 max_page_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * Response to the capabilities message. This is sent from the host to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * guest. This message notifies if the host has accepted the guest's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * capabilities. If the host has not accepted, the guest must shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * the service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * is_accepted: Indicates if the host has accepted guest's capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * reservedz: Must be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) struct dm_capabilities_resp_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	__u64 is_accepted:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	__u64 reservedz:63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * This message is used to report memory pressure from the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * This message is not part of any transaction and there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * response to this message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * num_avail: Available memory in pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * num_committed: Committed memory in pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * page_file_size: The accumulated size of all page files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  *		   in the system in pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * zero_free: The nunber of zero and free pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * page_file_writes: The writes to the page file in pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * io_diff: An indicator of file cache efficiency or page file activity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  *	    calculated as File Cache Page Fault Count - Page Read Count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *	    This value is in pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * Some of these metrics are Windows specific and fortunately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * the algorithm on the host side that computes the guest memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * pressure only uses num_committed value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) struct dm_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	__u64 num_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	__u64 num_committed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	__u64 page_file_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	__u64 zero_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	__u32 page_file_writes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	__u32 io_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * Message to ask the guest to allocate memory - balloon up message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * This message is sent from the host to the guest. The guest may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * able to allocate as much memory as requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * num_pages: number of pages to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) struct dm_balloon {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	__u32 num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	__u32 reservedz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * Balloon response message; this message is sent from the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * to the host in response to the balloon message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * reservedz: Reserved; must be set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * more_pages: If FALSE, this is the last message of the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * if TRUE there will atleast one more message from the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * range_count: The number of ranges in the range array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * range_array: An array of page ranges returned to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) struct dm_balloon_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	__u32 reservedz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	__u32 more_pages:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	__u32 range_count:31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	union dm_mem_page_range range_array[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * Un-balloon message; this message is sent from the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * to the guest to give guest more memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * more_pages: If FALSE, this is the last message of the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * if TRUE there will atleast one more message from the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * reservedz: Reserved; must be set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * range_count: The number of ranges in the range array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  * range_array: An array of page ranges returned to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) struct dm_unballoon_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	__u32 more_pages:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	__u32 reservedz:31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	__u32 range_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	union dm_mem_page_range range_array[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * Un-balloon response message; this message is sent from the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * to the host in response to an unballoon request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) struct dm_unballoon_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * Hot add request message. Message sent from the host to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * mem_range: Memory range to hot add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) struct dm_hot_add {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	union dm_mem_page_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * Hot add response message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * This message is sent by the guest to report the status of a hot add request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * If page_count is less than the requested page count, then the host should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * assume all further hot add requests will fail, since this indicates that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * the guest has hit an upper physical memory barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * Hot adds may also fail due to low resources; in this case, the guest must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * not complete this message until the hot add can succeed, and the host must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * not send a new hot add request until the response is sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * times it fails the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * page_count: number of pages that were successfully hot added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  * result: result of the operation 1: success, 0: failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) struct dm_hot_add_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	__u32 page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	__u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * Types of information sent from host to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) enum dm_info_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	INFO_TYPE_MAX_PAGE_CNT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	MAX_INFO_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * Header for the information message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) struct dm_info_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	enum dm_info_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	__u32 data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * This message is sent from the host to the guest to pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  * some relevant information (win8 addition).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  * reserved: no used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * info_size: size of the information blob.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * info: information blob.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) struct dm_info_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct dm_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	__u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	__u32 info_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	__u8  info[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * End protocol definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  * State to manage hot adding memory into the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  * The range start_pfn : end_pfn specifies the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * that the host has asked us to hot add. The range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * start_pfn : ha_end_pfn specifies the range that we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * currently hot added. We hot add in multiples of 128M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * chunks; it is possible that we may not be able to bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * online all the pages in the region. The range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * covered_start_pfn:covered_end_pfn defines the pages that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * be brough online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) struct hv_hotadd_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	unsigned long start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	unsigned long covered_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	unsigned long covered_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	unsigned long ha_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	unsigned long end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 * A list of gaps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	struct list_head gap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) struct hv_hotadd_gap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned long start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	unsigned long end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) struct balloon_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	__u32 num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct work_struct wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) struct hot_add_wrk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	union dm_mem_page_range ha_page_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	union dm_mem_page_range ha_region_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	struct work_struct wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static bool allow_hibernation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static bool hot_add = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static bool do_hot_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * Delay reporting memory pressure by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * the specified number of seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static uint pressure_report_delay = 45;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * The last time we posted a pressure report to host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static unsigned long last_post_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static atomic_t trans_id = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static int dm_ring_size = 20 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * Driver specific state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) enum hv_dm_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	DM_INITIALIZING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	DM_INITIALIZED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	DM_BALLOON_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	DM_BALLOON_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	DM_HOT_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	DM_INIT_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) #define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) #define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) struct hv_dynmem_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct hv_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	enum hv_dm_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct completion host_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct completion config_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	 * Number of pages we have currently ballooned out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	unsigned int num_pages_ballooned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	unsigned int num_pages_onlined;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	unsigned int num_pages_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * State to manage the ballooning (up) operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct balloon_state balloon_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 * State to execute the "hot-add" operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct hot_add_wrk ha_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * This state tracks if the host has specified a hot-add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 * region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	bool host_specified_ha_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * State to synchronize hot-add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct completion  ol_waitevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * This thread handles hot-add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * requests from the host as well as notifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * the host with regards to memory pressure in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * Protects ha_region_list, num_pages_onlined counter and individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 * regions from ha_region_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	spinlock_t ha_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 * A list of hot-add regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct list_head ha_region_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * We start with the highest version we can support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 * and downgrade based on the host; we save here the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 * next version to try.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	__u32 next_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * The negotiated version agreed by host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	__u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static struct hv_dynmem_device dm_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static void post_status(struct hv_dynmem_device *dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				     unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct hv_hotadd_gap *gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	/* The page is not backed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	/* Check for gaps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	list_for_each_entry(gap, &has->gap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static unsigned long hv_page_offline_check(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 					   unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	unsigned long pfn = start_pfn, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct hv_hotadd_state *has;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	while (pfn < start_pfn + nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		 * Search for HAS which covers the pfn and when we find one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		 * count how many consequitive PFNs are covered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		list_for_each_entry(has, &dm_device.ha_region_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			while ((pfn >= has->start_pfn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			       (pfn < has->end_pfn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			       (pfn < start_pfn + nr_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				if (has_pfn_is_backed(has, pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 					count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		 * This PFN is not in any HAS (e.g. we're offlining a region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		 * which was present at boot), no need to account for it. Go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		 * to the next one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			      void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct memory_notify *mem = (struct memory_notify *)v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	unsigned long flags, pfn_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	case MEM_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	case MEM_CANCEL_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		complete(&dm_device.ol_waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	case MEM_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		pfn_count = hv_page_offline_check(mem->start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 						  mem->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		if (pfn_count <= dm_device.num_pages_onlined) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			dm_device.num_pages_onlined -= pfn_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			 * We're offlining more pages than we managed to online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			 * This is unexpected. In any case don't let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			 * num_pages_onlined wrap around zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			dm_device.num_pages_onlined = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	case MEM_GOING_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	case MEM_GOING_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	case MEM_CANCEL_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) static struct notifier_block hv_memory_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	.notifier_call = hv_memory_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	.priority = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) /* Check if the particular page is backed and can be onlined and online it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		if (!PageOffline(pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			__SetPageOffline(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (PageOffline(pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		__ClearPageOffline(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/* This frame is currently backed; online the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	generic_online_page(pg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	lockdep_assert_held(&dm_device.ha_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	dm_device.num_pages_onlined++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void hv_bring_pgs_online(struct hv_hotadd_state *has,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				unsigned long start_pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		hv_page_online_one(has, pfn_to_page(start_pfn + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static void hv_mem_hot_add(unsigned long start, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				unsigned long pfn_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				struct hv_hotadd_state *has)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	int i, nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	unsigned long start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	unsigned long processed_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	unsigned long total_pfn = pfn_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	for (i = 0; i < (size/HA_CHUNK); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		start_pfn = start + (i * HA_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		has->ha_end_pfn +=  HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		if (total_pfn > HA_CHUNK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			processed_pfn = HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			total_pfn -= HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			processed_pfn = total_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			total_pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		has->covered_end_pfn +=  processed_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		reinit_completion(&dm_device.ol_waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		ret = add_memory(nid, PFN_PHYS((start_pfn)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				(HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			pr_err("hot_add memory failed error is %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			if (ret == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				 * This error indicates that the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 				 * is not a transient failure. This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 				 * case where the guest's physical address map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				 * precludes hot adding memory. Stop all further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 				 * memory hot-add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				do_hot_add = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			has->ha_end_pfn -= HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			has->covered_end_pfn -=  processed_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		 * Wait for memory to get onlined. If the kernel onlined the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		 * memory when adding it, this will return directly. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		 * it will wait for user space to online the memory. This helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		 * to avoid adding memory faster than it is getting onlined. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		 * adding succeeded, it is ok to proceed even if the memory was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		 * not onlined in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		post_status(&dm_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void hv_online_page(struct page *pg, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	struct hv_hotadd_state *has;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	unsigned long pfn = page_to_pfn(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		/* The page belongs to a different HAS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		if ((pfn < has->start_pfn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				(pfn + (1UL << order) > has->end_pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		hv_bring_pgs_online(has, pfn, 1UL << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct hv_hotadd_state *has;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct hv_hotadd_gap *gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	unsigned long residual, new_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		 * If the pfn range we are dealing with is not in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		 * "hot add block", move on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		 * If the current start pfn is not where the covered_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		 * is, create a gap and update covered_end_pfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (has->covered_end_pfn != start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			if (!gap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			INIT_LIST_HEAD(&gap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			gap->start_pfn = has->covered_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			gap->end_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			list_add_tail(&gap->list, &has->gap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			has->covered_end_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		 * If the current hot add-request extends beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		 * our current limit; extend it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		if ((start_pfn + pfn_cnt) > has->end_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			residual = (start_pfn + pfn_cnt - has->end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			 * Extend the region by multiples of HA_CHUNK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			if (residual % HA_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				new_inc += HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			has->end_pfn += new_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) static unsigned long handle_pg_range(unsigned long pg_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 					unsigned long pg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	unsigned long start_pfn = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned long pfn_cnt = pg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct hv_hotadd_state *has;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	unsigned long pgs_ol = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned long old_covered_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	unsigned long res = 0, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		pg_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		 * If the pfn range we are dealing with is not in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		 * "hot add block", move on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		old_covered_state = has->covered_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		if (start_pfn < has->ha_end_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			 * This is the case where we are backing pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			 * in an already hot added region. Bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			 * these pages online first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			pgs_ol = has->ha_end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			if (pgs_ol > pfn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				pgs_ol = pfn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			has->covered_end_pfn +=  pgs_ol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			pfn_cnt -= pgs_ol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			 * Check if the corresponding memory block is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			 * online. It is possible to observe struct pages still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			 * being uninitialized here so check section instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			 * In case the section is online we need to bring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			 * rest of pfns (which were not backed previously)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			 * online too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			if (start_pfn > has->start_pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			    online_section_nr(pfn_to_section_nr(start_pfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 				hv_bring_pgs_online(has, start_pfn, pgs_ol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			 * We have some residual hot add range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			 * that needs to be hot added; hot add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			 * it now. Hot add a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			 * of HA_CHUNK that fully covers the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			 * we have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			size = (has->end_pfn - has->ha_end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			if (pfn_cnt <= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				if (pfn_cnt % HA_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 					size += HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 				pfn_cnt = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		 * If we managed to online any pages that were given to us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		 * we declare success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		res = has->covered_end_pfn - old_covered_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static unsigned long process_hot_add(unsigned long pg_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 					unsigned long pfn_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 					unsigned long rg_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 					unsigned long rg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	struct hv_hotadd_state *ha_region = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	int covered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (pfn_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (!dm_device.host_specified_ha_region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		covered = pfn_covered(pg_start, pfn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (covered < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (covered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			goto do_pg_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 * If the host has specified a hot-add range; deal with it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (rg_size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (!ha_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		INIT_LIST_HEAD(&ha_region->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		INIT_LIST_HEAD(&ha_region->gap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		ha_region->start_pfn = rg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		ha_region->ha_end_pfn = rg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		ha_region->covered_start_pfn = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		ha_region->covered_end_pfn = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		ha_region->end_pfn = rg_start + rg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) do_pg_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * Process the page range specified; bringing them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * online if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	return handle_pg_range(pg_start, pfn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static void hot_add_req(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct dm_hot_add_response resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	unsigned long pg_start, pfn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	unsigned long rg_start, rg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct hv_dynmem_device *dm = &dm_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	memset(&resp, 0, sizeof(struct dm_hot_add_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	resp.hdr.size = sizeof(struct dm_hot_add_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		unsigned long region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		unsigned long region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		 * The host has not specified the hot-add region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		 * Based on the hot-add page range being specified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		 * compute a hot-add region that can cover the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		 * that need to be hot-added while ensuring the alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		 * and size requirements of Linux as it relates to hot-add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		region_start = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (pfn_cnt % HA_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			region_size += HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		rg_start = region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		rg_sz = region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (do_hot_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		resp.page_count = process_hot_add(pg_start, pfn_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 						rg_start, rg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	dm->num_pages_added += resp.page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 * The result field of the response structure has the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 * following semantics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * 1. If all or some pages hot-added: Guest should return success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 * 2. If no pages could be hot-added:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * If the guest returns success, then the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * will not attempt any further hot-add operations. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 * signifies a permanent failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * If the guest returns failure, then this failure will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * treated as a transient failure and the host may retry the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * hot-add operation after some delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (resp.page_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		resp.result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	else if (!do_hot_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		resp.result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		resp.result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (!do_hot_add || resp.page_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (!allow_hibernation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			pr_err("Memory hot add failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			pr_info("Ignore hot-add request!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	dm->state = DM_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	vmbus_sendpacket(dm->dev->channel, &resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			sizeof(struct dm_hot_add_response),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			(unsigned long)NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct dm_info_header *info_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	info_hdr = (struct dm_info_header *)msg->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	switch (info_hdr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	case INFO_TYPE_MAX_PAGE_CNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		if (info_hdr->data_size == sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			__u64 *max_page_count = (__u64 *)&info_hdr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			pr_info("Max. dynamic memory size: %llu MB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		pr_warn("Received Unknown type: %d\n", info_hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static unsigned long compute_balloon_floor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	unsigned long min_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	unsigned long nr_pages = totalram_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	/* Simple continuous piecewiese linear function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	 *  max MiB -> min MiB  gradient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	 *       0         0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	 *      16        16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	 *      32        24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 *     128        72    (1/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 *     512       168    (1/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 *    2048       360    (1/8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 *    8192       744    (1/16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 *   32768      1512	(1/32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (nr_pages < MB2PAGES(128))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		min_pages = MB2PAGES(8) + (nr_pages >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	else if (nr_pages < MB2PAGES(512))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		min_pages = MB2PAGES(40) + (nr_pages >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	else if (nr_pages < MB2PAGES(2048))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		min_pages = MB2PAGES(104) + (nr_pages >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	else if (nr_pages < MB2PAGES(8192))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		min_pages = MB2PAGES(232) + (nr_pages >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		min_pages = MB2PAGES(488) + (nr_pages >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #undef MB2PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return min_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * Post our status as it relates memory pressure to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * host. Host expects the guests to post this status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  * periodically at 1 second intervals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * The metrics specified in this protocol are very Windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * specific and so we cook up numbers here to convey our memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void post_status(struct hv_dynmem_device *dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	struct dm_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	unsigned long last_post = last_post_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (pressure_report_delay > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		--pressure_report_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (!time_after(now, (last_post_time + HZ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	memset(&status, 0, sizeof(struct dm_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	status.hdr.type = DM_STATUS_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	status.hdr.size = sizeof(struct dm_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	status.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 * The host expects the guest to report free and committed memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	 * Furthermore, the host expects the pressure information to include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	 * the ballooned out pages. For a given amount of memory that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * managing we need to compute a floor below which we should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 * balloon. Compute this and add it to the pressure report.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	 * We also need to report all offline pages (num_pages_added -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	 * num_pages_onlined) as committed to the host, otherwise it can try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * asking us to balloon them out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	status.num_avail = si_mem_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	status.num_committed = vm_memory_committed() +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		dm->num_pages_ballooned +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		(dm->num_pages_added > dm->num_pages_onlined ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		 dm->num_pages_added - dm->num_pages_onlined : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		compute_balloon_floor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	trace_balloon_status(status.num_avail, status.num_committed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			     vm_memory_committed(), dm->num_pages_ballooned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			     dm->num_pages_added, dm->num_pages_onlined);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 * If our transaction ID is no longer current, just don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	 * send the status. This can happen if we were interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	 * after we picked our transaction ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (status.hdr.trans_id != atomic_read(&trans_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	 * If the last post time that we sampled has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	 * we have raced, don't post the status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (last_post != last_post_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	last_post_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	vmbus_sendpacket(dm->dev->channel, &status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 				sizeof(struct dm_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 				(unsigned long)NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void free_balloon_pages(struct hv_dynmem_device *dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			 union dm_mem_page_range *range_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	int num_pages = range_array->finfo.page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	__u64 start_frame = range_array->finfo.start_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		pg = pfn_to_page(i + start_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		__ClearPageOffline(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		__free_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		dm->num_pages_ballooned--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 					unsigned int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 					struct dm_balloon_response *bl_resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 					int alloc_unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	for (i = 0; i < num_pages / alloc_unit; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			HV_HYP_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			return i * alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		 * We execute this code in a thread context. Furthermore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		 * we don't want the kernel to try too hard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				__GFP_NOMEMALLOC | __GFP_NOWARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 				get_order(alloc_unit << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			return i * alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		dm->num_pages_ballooned += alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * If we allocatted 2M pages; split them so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 * can free them in any order we get.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		if (alloc_unit != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		/* mark all pages offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			__SetPageOffline(pg + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		bl_resp->range_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		bl_resp->range_array[i].finfo.start_page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			page_to_pfn(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	return i * alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void balloon_up(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	unsigned int num_ballooned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct dm_balloon_response *bl_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	int alloc_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	long avail_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	unsigned long floor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	 * We will attempt 2M allocations. However, if we fail to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	 * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	alloc_unit = PAGES_IN_2M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	avail_pages = si_mem_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	floor = compute_balloon_floor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/* Refuse to balloon below the floor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		pr_info("Balloon request will be partially fulfilled. %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			avail_pages < num_pages ? "Not enough memory." :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			"Balloon floor reached.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		bl_resp->more_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		num_pages -= num_ballooned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 						    bl_resp, alloc_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		if (alloc_unit != 1 && num_ballooned == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			alloc_unit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		if (num_ballooned == 0 || num_ballooned == num_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			pr_debug("Ballooned %u out of %u requested pages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 				num_pages, dm_device.balloon_wrk.num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			bl_resp->more_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			dm_device.state = DM_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		 * We are pushing a lot of data through the channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		 * deal with transient failures caused because of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		 * lack of space in the ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			ret = vmbus_sendpacket(dm_device.dev->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 						bl_resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 						bl_resp->hdr.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 						(unsigned long)NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 						VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			post_status(&dm_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		} while (ret == -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			 * Free up the memory we allocatted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			pr_err("Balloon response failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			for (i = 0; i < bl_resp->range_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				free_balloon_pages(&dm_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 						 &bl_resp->range_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static void balloon_down(struct hv_dynmem_device *dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			struct dm_unballoon_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	union dm_mem_page_range *range_array = req->range_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	int range_count = req->range_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	struct dm_unballoon_response resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	for (i = 0; i < range_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		free_balloon_pages(dm, &range_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		complete(&dm_device.config_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	pr_debug("Freed %u ballooned pages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		prev_pages_ballooned - dm->num_pages_ballooned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (req->more_pages == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	memset(&resp, 0, sizeof(struct dm_unballoon_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	resp.hdr.type = DM_UNBALLOON_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	resp.hdr.size = sizeof(struct dm_unballoon_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	vmbus_sendpacket(dm_device.dev->channel, &resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				sizeof(struct dm_unballoon_response),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 				(unsigned long)NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	dm->state = DM_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static void balloon_onchannelcallback(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int dm_thread_func(void *dm_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	struct hv_dynmem_device *dm = dm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 						&dm_device.config_event, 1*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		 * The host expects us to post information on the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		 * pressure every second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		reinit_completion(&dm_device.config_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		post_status(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static void version_resp(struct hv_dynmem_device *dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			struct dm_version_response *vresp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	struct dm_version_request version_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	if (vresp->is_accepted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		 * We are done; wakeup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		 * context waiting for version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		 * negotiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		complete(&dm->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	 * If there are more versions to try, continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	 * with negotiations; if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	 * shutdown the service since we are not able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	 * to negotiate a suitable version number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	 * with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (dm->next_version == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		goto version_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	memset(&version_req, 0, sizeof(struct dm_version_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	version_req.hdr.type = DM_VERSION_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	version_req.hdr.size = sizeof(struct dm_version_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	version_req.version.version = dm->next_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	dm->version = version_req.version.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 * Set the next version to try in case current version fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	 * Win7 protocol ought to be the last one to try.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	switch (version_req.version.version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	case DYNMEM_PROTOCOL_VERSION_WIN8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		version_req.is_last_attempt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		dm->next_version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		version_req.is_last_attempt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				sizeof(struct dm_version_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 				(unsigned long)NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 				VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		goto version_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) version_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	dm->state = DM_INIT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	complete(&dm->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void cap_resp(struct hv_dynmem_device *dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			struct dm_capabilities_resp_msg *cap_resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (!cap_resp->is_accepted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		pr_err("Capabilities not accepted by host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		dm->state = DM_INIT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	complete(&dm->host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static void balloon_onchannelcallback(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	struct hv_device *dev = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	u32 recvlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	u64 requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	struct dm_message *dm_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	struct dm_header *dm_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	struct dm_balloon *bal_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct dm_hot_add *ha_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	union dm_mem_page_range *ha_pg_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	union dm_mem_page_range *ha_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	memset(recv_buffer, 0, sizeof(recv_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	vmbus_recvpacket(dev->channel, recv_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (recvlen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		dm_msg = (struct dm_message *)recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		dm_hdr = &dm_msg->hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		switch (dm_hdr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		case DM_VERSION_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			version_resp(dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 				 (struct dm_version_response *)dm_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		case DM_CAPABILITIES_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			cap_resp(dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 				 (struct dm_capabilities_resp_msg *)dm_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		case DM_BALLOON_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			if (allow_hibernation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 				pr_info("Ignore balloon-up request!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			if (dm->state == DM_BALLOON_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 				pr_warn("Currently ballooning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			bal_msg = (struct dm_balloon *)recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			dm->state = DM_BALLOON_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			schedule_work(&dm_device.balloon_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		case DM_UNBALLOON_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			if (allow_hibernation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 				pr_info("Ignore balloon-down request!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			dm->state = DM_BALLOON_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			balloon_down(dm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 				 (struct dm_unballoon_request *)recv_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		case DM_MEM_HOT_ADD_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			if (dm->state == DM_HOT_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				pr_warn("Currently hot-adding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			dm->state = DM_HOT_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			ha_msg = (struct dm_hot_add *)recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				 * This is a normal hot-add request specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 				 * hot-add memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 				dm->host_specified_ha_region = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				ha_pg_range = &ha_msg->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 				dm->ha_wrk.ha_page_range = *ha_pg_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 				dm->ha_wrk.ha_region_range.page_range = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 				 * Host is specifying that we first hot-add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 				 * a region and then partially populate this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 				 * region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 				dm->host_specified_ha_region = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 				ha_pg_range = &ha_msg->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 				ha_region = &ha_pg_range[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 				dm->ha_wrk.ha_page_range = *ha_pg_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 				dm->ha_wrk.ha_region_range = *ha_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			schedule_work(&dm_device.ha_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		case DM_INFO_MESSAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			process_info(dm, (struct dm_info_msg *)dm_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static int balloon_connect_vsp(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	struct dm_version_request version_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	struct dm_capabilities cap_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	unsigned long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			 balloon_onchannelcallback, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	 * Initiate the hand shake with the host and negotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * a version that the host can support. We start with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 * highest version number and go down if the host cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 * support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	memset(&version_req, 0, sizeof(struct dm_version_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	version_req.hdr.type = DM_VERSION_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	version_req.hdr.size = sizeof(struct dm_version_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	version_req.is_last_attempt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	dm_device.version = version_req.version.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	ret = vmbus_sendpacket(dev->channel, &version_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			       sizeof(struct dm_version_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			       (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (t == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 * If we could not negotiate a compatible version with the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	 * fail the probe function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	if (dm_device.state == DM_INIT_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	pr_info("Using Dynamic Memory protocol version %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		DYNMEM_MAJOR_VERSION(dm_device.version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		DYNMEM_MINOR_VERSION(dm_device.version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 * Now submit our capabilities to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	cap_msg.hdr.size = sizeof(struct dm_capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * currently still requires the bits to be set, so we have to add code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * to fail the host's hot-add and balloon up/down requests, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	cap_msg.caps.cap_bits.balloon = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	cap_msg.caps.cap_bits.hot_add = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	 * Specify our alignment requirements as it relates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	 * memory hot-add. Specify 128MB alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	cap_msg.caps.cap_bits.hot_add_alignment = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	 * Currently the host does not use these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 * values and we set them to what is done in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	 * Windows driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	cap_msg.min_page_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	cap_msg.max_page_number = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	ret = vmbus_sendpacket(dev->channel, &cap_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			       sizeof(struct dm_capabilities),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 			       (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (t == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	 * If the host does not like our capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	 * fail the probe function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	if (dm_device.state == DM_INIT_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static int balloon_probe(struct hv_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			 const struct hv_vmbus_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	allow_hibernation = hv_is_hibernation_supported();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (allow_hibernation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		hot_add = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	do_hot_add = hot_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	do_hot_add = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	dm_device.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	dm_device.state = DM_INITIALIZING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	init_completion(&dm_device.host_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	init_completion(&dm_device.config_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	INIT_LIST_HEAD(&dm_device.ha_region_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	spin_lock_init(&dm_device.ha_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	dm_device.host_specified_ha_region = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	set_online_page_callback(&hv_online_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	init_completion(&dm_device.ol_waitevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	register_memory_notifier(&hv_memory_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	hv_set_drvdata(dev, &dm_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	ret = balloon_connect_vsp(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	dm_device.state = DM_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	dm_device.thread =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	if (IS_ERR(dm_device.thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		ret = PTR_ERR(dm_device.thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		goto probe_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) probe_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	dm_device.state = DM_INIT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	dm_device.thread  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	unregister_memory_notifier(&hv_memory_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	restore_online_page_callback(&hv_online_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int balloon_remove(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	struct hv_hotadd_state *has, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	struct hv_hotadd_gap *gap, *tmp_gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (dm->num_pages_ballooned != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	cancel_work_sync(&dm->balloon_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	cancel_work_sync(&dm->ha_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	kthread_stop(dm->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	unregister_memory_notifier(&hv_memory_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	restore_online_page_callback(&hv_online_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	spin_lock_irqsave(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			list_del(&gap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			kfree(gap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		list_del(&has->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		kfree(has);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static int balloon_suspend(struct hv_device *hv_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	tasklet_disable(&hv_dev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	cancel_work_sync(&dm->balloon_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	cancel_work_sync(&dm->ha_wrk.wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (dm->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		kthread_stop(dm->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		dm->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		vmbus_close(hv_dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	tasklet_enable(&hv_dev->channel->callback_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static int balloon_resume(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	dm_device.state = DM_INITIALIZING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	ret = balloon_connect_vsp(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	dm_device.thread =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (IS_ERR(dm_device.thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		ret = PTR_ERR(dm_device.thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		dm_device.thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		goto close_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	dm_device.state = DM_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) close_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	dm_device.state = DM_INIT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	unregister_memory_notifier(&hv_memory_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	restore_online_page_callback(&hv_online_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static const struct hv_vmbus_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	/* Dynamic Memory Class ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	/* 525074DC-8985-46e2-8057-A307DC18A502 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	{ HV_DM_GUID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) MODULE_DEVICE_TABLE(vmbus, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static  struct hv_driver balloon_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	.name = "hv_balloon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	.id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	.probe =  balloon_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	.remove =  balloon_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	.suspend = balloon_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	.resume = balloon_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static int __init init_balloon_drv(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	return vmbus_driver_register(&balloon_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) module_init(init_balloon_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) MODULE_DESCRIPTION("Hyper-V Balloon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) MODULE_LICENSE("GPL");