Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * VMware Balloon driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * This is VMware physical memory management driver for Linux. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * acts like a "balloon" that can be inflated to reclaim physical pages by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * reserving them in the guest and invalidating them in the monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * freeing up the underlying machine pages so they can be allocated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * other guests.  The balloon can also be deflated to allow the guest to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * use more physical memory. Higher level policies can control the sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * of balloons in VMs in order to manage physical memory resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) //#define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/pseudo_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/balloon_compaction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/vmw_vmci_defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/vmw_vmci_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) MODULE_AUTHOR("VMware, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) MODULE_ALIAS("dmi:*:svnVMware*:*");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) MODULE_ALIAS("vmware_vmmemctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static bool __read_mostly vmwballoon_shrinker_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) module_param(vmwballoon_shrinker_enable, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) MODULE_PARM_DESC(vmwballoon_shrinker_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /* Delay in seconds after shrink before inflation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define VMBALLOON_SHRINK_DELAY		(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) /* Maximum number of refused pages we accumulate during inflation cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define VMW_BALLOON_MAX_REFUSED		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* Magic number for the balloon mount-point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define BALLOON_VMW_MAGIC		0x0ba11007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * Hypervisor communication port definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define VMW_BALLOON_HV_PORT		0x5670
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define VMW_BALLOON_GUEST_ID		1	/* Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) enum vmwballoon_capabilities {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	 * Bit 0 is reserved and not associated to any capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 					| VMW_BALLOON_BATCHED_CMDS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 					| VMW_BALLOON_BATCHED_2M_CMDS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * 64-bit targets are only supported in 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 					| VMW_BALLOON_64_BIT_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) enum vmballoon_page_size_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	VMW_BALLOON_4K_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	VMW_BALLOON_2M_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static const char * const vmballoon_page_size_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	[VMW_BALLOON_4K_PAGE]			= "4k",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	[VMW_BALLOON_2M_PAGE]			= "2M"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) enum vmballoon_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	VMW_BALLOON_INFLATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	VMW_BALLOON_DEFLATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) enum vmballoon_op_stat_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	VMW_BALLOON_OP_STAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	VMW_BALLOON_OP_FAIL_STAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * enum vmballoon_cmd_type - backdoor commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * Availability of the commands is as followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * %VMW_BALLOON_CMD_GUEST_ID are always available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  *			    to be deflated from the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *			      runs in the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  *				  ballooned pages (up to 512).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *				  pages that are about to be deflated from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  *				  balloon (up to 512).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *				     for 2MB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *				       pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *				       that would be invoked when the balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *				       size changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @VMW_BALLOON_CMD_LAST: Value of the last command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) enum vmballoon_cmd_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	VMW_BALLOON_CMD_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	VMW_BALLOON_CMD_GET_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	VMW_BALLOON_CMD_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	VMW_BALLOON_CMD_UNLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	VMW_BALLOON_CMD_GUEST_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	/* No command 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	VMW_BALLOON_CMD_BATCHED_UNLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) enum vmballoon_error_codes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	VMW_BALLOON_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	VMW_BALLOON_ERROR_CMD_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	VMW_BALLOON_ERROR_PPN_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	VMW_BALLOON_ERROR_PPN_LOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	VMW_BALLOON_ERROR_PPN_UNLOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	VMW_BALLOON_ERROR_PPN_PINNED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	VMW_BALLOON_ERROR_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	VMW_BALLOON_ERROR_BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static const char * const vmballoon_cmd_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	[VMW_BALLOON_CMD_START]			= "start",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	[VMW_BALLOON_CMD_LOCK]			= "lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) enum vmballoon_stat_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	VMW_BALLOON_PAGE_STAT_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	VMW_BALLOON_PAGE_STAT_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) enum vmballoon_stat_general {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	VMW_BALLOON_STAT_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	VMW_BALLOON_STAT_DOORBELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	VMW_BALLOON_STAT_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	VMW_BALLOON_STAT_SHRINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	VMW_BALLOON_STAT_SHRINK_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) struct vmballoon_ctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	struct list_head pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	struct list_head refused_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct list_head prealloc_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	unsigned int n_refused_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	unsigned int n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	enum vmballoon_page_size_type page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	enum vmballoon_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * struct vmballoon_batch_entry - a batch entry for lock or unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * @status: the status of the operation, which is written by the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * @reserved: reserved for future use. Must be set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * @pfn: the physical frame number of the page to be locked or unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) struct vmballoon_batch_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	u64 status : 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	u64 reserved : PAGE_SHIFT - 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u64 pfn : 52;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) struct vmballoon {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * @max_page_size: maximum supported page size for ballooning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * Protected by @conf_sem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	enum vmballoon_page_size_type max_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	 * @size: balloon actual size in basic page size (frames).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	 * While we currently do not support size which is bigger than 32-bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * in preparation for future support, use 64-bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	atomic64_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 * @target: balloon target size in basic page size (frames).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	 * We do not protect the target under the assumption that setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	 * value is always done through a single write. If this assumption ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * optimized code. Although we may read stale target value if multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * accesses happen at once, the performance impact should be minor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	unsigned long target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 * @reset_required: reset flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 * Setting this flag may introduce races, but the code is expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	 * handle them gracefully. In the worst case, another operation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 * fail as reset did not take place. Clearing the flag is done while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 * holding @conf_sem for write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	bool reset_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * @capabilities: hypervisor balloon capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * Protected by @conf_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	unsigned long capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 * @batch_page: pointer to communication batch page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	 * When batching is used, batch_page points to a page, which holds up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct vmballoon_batch_entry *batch_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	 * @batch_max_pages: maximum pages that can be locked/unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 * Indicates the number of pages that the hypervisor can lock or unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	 * at once, according to whether batching is enabled. If batching is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	 * disabled, only a single page can be locked/unlock on each operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	 * Protected by @conf_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	unsigned int batch_max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * @page: page to be locked/unlocked by the hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * @page is only used when batching is disabled and a single page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * reclaimed on each iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * Protected by @comm_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	 * @shrink_timeout: timeout until the next inflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	 * After an shrink event, indicates the time in jiffies after which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 * inflation is allowed again. Can be written concurrently with reads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * so must use READ_ONCE/WRITE_ONCE when accessing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	unsigned long shrink_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/* statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct vmballoon_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* debugfs file exporting statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct dentry *dbg_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * @b_dev_info: balloon device information descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct balloon_dev_info b_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	struct delayed_work dwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * @huge_pages - list of the inflated 2MB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 * Protected by @b_dev_info.pages_lock .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct list_head huge_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * @vmci_doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * Protected by @conf_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct vmci_handle vmci_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 * @conf_sem: semaphore to protect the configuration and the statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct rw_semaphore conf_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	 * @comm_lock: lock to protect the communication with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 * Lock ordering: @conf_sem -> @comm_lock .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	spinlock_t comm_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * @shrinker: shrinker interface that is used to avoid over-inflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct shrinker shrinker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * @shrinker_registered: whether the shrinker was registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * The shrinker interface does not handle gracefully the removal of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 * shrinker that was not registered before. This indication allows to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * simplify the unregistration process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	bool shrinker_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static struct vmballoon balloon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) struct vmballoon_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	/* timer / doorbell operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/* allocation statistics for huge and small pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	atomic64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/* Monitor operations: total operations, and failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static inline bool is_vmballoon_stats_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	return IS_ENABLED(CONFIG_DEBUG_FS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		static_branch_unlikely(&balloon_stat_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 					  enum vmballoon_op_stat_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (is_vmballoon_stats_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		atomic64_inc(&b->stats->ops[op][type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 					   enum vmballoon_stat_general stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (is_vmballoon_stats_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		atomic64_inc(&b->stats->general_stat[stat]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static inline void vmballoon_stats_gen_add(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 					   enum vmballoon_stat_general stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 					   unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	if (is_vmballoon_stats_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		atomic64_add(val, &b->stats->general_stat[stat]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static inline void vmballoon_stats_page_inc(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 					    enum vmballoon_stat_page stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 					    enum vmballoon_page_size_type size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (is_vmballoon_stats_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		atomic64_inc(&b->stats->page_stat[stat][size]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static inline void vmballoon_stats_page_add(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 					    enum vmballoon_stat_page stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 					    enum vmballoon_page_size_type size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 					    unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (is_vmballoon_stats_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		atomic64_add(val, &b->stats->page_stat[stat][size]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		unsigned long arg2, unsigned long *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	unsigned long status, dummy1, dummy2, dummy3, local_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	asm volatile ("inl %%dx" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		"=a"(status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		"=c"(dummy1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		"=d"(dummy2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		"=b"(local_result),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		"=S"(dummy3) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		"0"(VMW_BALLOON_HV_MAGIC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		"1"(cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		"2"(VMW_BALLOON_HV_PORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		"3"(arg1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		"4"(arg2) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		"memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* update the result if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 							   local_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* update target when applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (status == VMW_BALLOON_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		WRITE_ONCE(b->target, local_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (status != VMW_BALLOON_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			 status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	/* mark reset required accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (status == VMW_BALLOON_ERROR_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		b->reset_required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static __always_inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	      unsigned long arg2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	unsigned long dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * Send "start" command to the host, communicating supported version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * of the protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	unsigned long status, capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				 &capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		b->capabilities = capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	case VMW_BALLOON_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		b->capabilities = VMW_BALLOON_BASIC_CMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * 2MB pages are only supported with batching. If batching is for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * reason disabled, do not use 2MB pages, since otherwise the legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 * mechanism is used with 2MB pages, causing a failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	b->max_page_size = VMW_BALLOON_4K_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		b->max_page_size = VMW_BALLOON_2M_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * vmballoon_send_guest_id - communicate guest type to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * Communicate guest type to the host so that it can adjust ballooning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * algorithm to the one most appropriate for the guest. This command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * is normally issued after sending "start" command and is part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * standard reset sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * Return: zero on success or appropriate error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static int vmballoon_send_guest_id(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			       VMW_BALLOON_GUEST_ID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * vmballoon_page_order() - return the order of the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * @page_size: the size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Return: the allocation order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * vmballoon_page_in_frames() - returns the number of frames in a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * @page_size: the size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * Return: the number of 4k frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static inline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	return 1 << vmballoon_page_order(page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * vmballoon_mark_page_offline() - mark a page as offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * @page: pointer for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * @page_size: the size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) vmballoon_mark_page_offline(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			    enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		__SetPageOffline(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * vmballoon_mark_page_online() - mark a page as online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * @page: pointer for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * @page_size: the size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) vmballoon_mark_page_online(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			   enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		__ClearPageOffline(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * by the host-guest protocol and EIO if an error occurred in communicating with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static int vmballoon_send_get_target(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	unsigned long limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	limit = totalram_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	    limit != (u32)limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * vmballoon_alloc_page_list - allocates a list of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * @req_n_pages: the number of requested pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * Return: zero on success or error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) static int vmballoon_alloc_page_list(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				     struct vmballoon_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				     unsigned int req_n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	for (i = 0; i < req_n_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 * First check if we happen to have pages that were allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * before. This happens when 2MB page rejected during inflation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 * by the hypervisor, and then split into 4KB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (!list_empty(&ctl->prealloc_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			page = list_first_entry(&ctl->prealloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 						struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				page = balloon_page_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 						 ctl->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			/* Success. Add the page to the list and continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			list_add(&page->lru, &ctl->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		/* Allocation failed. Update statistics and stop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 					 ctl->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	ctl->n_pages = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * @b: pointer for %struct vmballoon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * @page: pointer for the page whose result should be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * @page_size: size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  * @status: status of the operation as provided by the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 				       enum vmballoon_page_size_type page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				       unsigned long status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* On success do nothing. The page is already on the balloon list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (likely(status == VMW_BALLOON_SUCCESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		 page_to_pfn(page), status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		 vmballoon_page_size_names[page_size]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* Error occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				 page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * vmballoon_status_page - returns the status of (un)lock operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * @idx: index for the page for which the operation is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @p: pointer to where the page struct is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * Following a lock or unlock operation, returns the status of the operation for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * an individual page. Provides the page that the operation was performed on on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * the @page argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * Returns: The status of a lock or unlock operation for an individual page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 					   struct page **p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (static_branch_likely(&vmw_balloon_batching)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		/* batching mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		*p = pfn_to_page(b->batch_page[idx].pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		return b->batch_page[idx].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* non-batching mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	*p = b->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	 * If a failure occurs, the indication will be provided in the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * of the entire operation, which is considered before the individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * page status. So for non-batching mode, the indication is always of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	return VMW_BALLOON_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  * vmballoon_lock_op - notifies the host about inflated/deflated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * @num_pages: number of inflated/deflated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * @page_size: size of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * @op: the type of operation (lock or unlock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * Notify the host about page(s) that were ballooned (or removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * balloon) so that host can use it without fear that guest will need it (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * stop using them since the VM does). Host may reject some pages, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * check the return value and maybe submit a different page. The pages that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * inflated/deflated are pointed by @b->page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * Return: result as provided by the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static unsigned long vmballoon_lock_op(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				       unsigned int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				       enum vmballoon_page_size_type page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				       enum vmballoon_op op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	unsigned long cmd, pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	lockdep_assert_held(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (static_branch_likely(&vmw_balloon_batching)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (op == VMW_BALLOON_INFLATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				VMW_BALLOON_CMD_BATCHED_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				VMW_BALLOON_CMD_BATCHED_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 						  VMW_BALLOON_CMD_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		pfn = page_to_pfn(b->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		/* In non-batching mode, PFNs must fit in 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (unlikely(pfn != (u32)pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			return VMW_BALLOON_ERROR_PPN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	return vmballoon_cmd(b, cmd, pfn, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * vmballoon_add_page - adds a page towards lock/unlock operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * @idx: index of the page to be ballooned in this batch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * @p: pointer to the page that is about to be ballooned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Adds the page to be ballooned. Must be called while holding @comm_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			       struct page *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	lockdep_assert_held(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (static_branch_likely(&vmw_balloon_batching))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		b->batch_page[idx] = (struct vmballoon_batch_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 					{ .pfn = page_to_pfn(p) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		b->page = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * vmballoon_lock - lock or unlock a batch of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  * Notifies the host of about ballooned pages (after inflation or deflation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  * according to @ctl). If the host rejects the page put it on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * @ctl refuse list. These refused page are then released when moving to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * next size of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * Note that we neither free any @page here nor put them back on the ballooned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * pages list. Instead we queue it for later processing. We do that for several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * reasons. First, we do not want to free the page under the lock. Second, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  * allows us to unify the handling of lock and unlock. In the inflate case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * caller will check if there are too many refused pages and release them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * Although it is not identical to the past behavior, it should not affect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	unsigned long batch_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	unsigned int i, num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	num_pages = ctl->n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (num_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	/* communication with the host is done under the communication lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	spin_lock(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	list_for_each_entry(page, &ctl->pages, lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		vmballoon_add_page(b, i++, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 					 ctl->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * Iterate over the pages in the provided list. Since we are changing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * @ctl->n_pages we are saving the original value in @num_pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * use this value to bound the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		status = vmballoon_status_page(b, i, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 * Failure of the whole batch overrides a single operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 * results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (batch_status != VMW_BALLOON_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			status = batch_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		/* Continue if no error happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 						 status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		 * Error happened. Move the pages to the refused list and update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 * the pages number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		list_move(&page->lru, &ctl->refused_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		ctl->n_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		ctl->n_refused_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	spin_unlock(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * vmballoon_release_page_list() - Releases a page list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * @page_list: list of pages to release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * @n_pages: pointer to the number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * @page_size: whether the pages in the list are 2MB (or else 4KB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * Releases the list of pages and zeros the number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static void vmballoon_release_page_list(struct list_head *page_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				       int *n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				       enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	list_for_each_entry_safe(page, tmp, page_list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		__free_pages(page, vmballoon_page_order(page_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		*n_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * Release pages that were allocated while attempting to inflate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * balloon but were refused by the host for one reason or another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static void vmballoon_release_refused_pages(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 					    struct vmballoon_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				 ctl->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				    ctl->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * vmballoon_change - retrieve the required balloon change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * @b: pointer for the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * Return: the required change for the balloon size. A positive number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * indicates inflation, a negative number indicates a deflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static int64_t vmballoon_change(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	int64_t size, target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	size = atomic64_read(&b->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	target = READ_ONCE(b->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * We must cast first because of int sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * Otherwise we might get huge positives instead of negatives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (b->reset_required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* consider a 2MB slack on deflate, unless the balloon is emptied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (target < size && target != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	/* If an out-of-memory recently occurred, inflation is disallowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	return target - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * @b: pointer to balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * @pages: list of pages to enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * @n_pages: pointer to number of pages in list. The value is zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * @page_size: whether the pages are 2MB or 4KB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * Enqueues the provides list of pages in the ballooned page list, clears the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * list and zeroes the number of pages that was provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static void vmballoon_enqueue_page_list(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 					struct list_head *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 					unsigned int *n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 					enum vmballoon_page_size_type page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (page_size == VMW_BALLOON_4K_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		balloon_page_list_enqueue(&b->b_dev_info, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		 * Keep the huge pages in a local list which is not available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 * for the balloon compaction mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		list_for_each_entry(page, pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		list_splice_init(pages, &b->huge_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		__count_vm_events(BALLOON_INFLATE, *n_pages *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	*n_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * @b: pointer to balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  * @pages: list of pages to enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * @n_pages: pointer to number of pages in list. The value is zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * @page_size: whether the pages are 2MB or 4KB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * @n_req_pages: the number of requested pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * Dequeues the number of requested pages from the balloon for deflation. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  * number of dequeued pages may be lower, if not enough pages in the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  * size are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static void vmballoon_dequeue_page_list(struct vmballoon *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 					struct list_head *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 					unsigned int *n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 					enum vmballoon_page_size_type page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 					unsigned int n_req_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/* In the case of 4k pages, use the compaction infrastructure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (page_size == VMW_BALLOON_4K_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 						     n_req_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	/* 2MB pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		list_move(&page->lru, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		if (++i == n_req_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	__count_vm_events(BALLOON_DEFLATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	*n_pages = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * due to one or few 4KB pages. These 2MB pages may keep being allocated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * then being refused. To prevent this case, this function splits the refused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * pages into 4KB pages and adds them into @prealloc_pages list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	unsigned int i, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	order = vmballoon_page_order(ctl->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		split_page(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		for (i = 0; i < (1 << order); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			list_add(&page[i].lru, &ctl->prealloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	ctl->n_refused_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * vmballoon_inflate() - Inflate the balloon towards its target size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static void vmballoon_inflate(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	int64_t to_inflate_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	struct vmballoon_ctl ctl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		.pages = LIST_HEAD_INIT(ctl.pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		.page_size = b->max_page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		.op = VMW_BALLOON_INFLATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		unsigned int to_inflate_pages, page_in_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		int alloc_error, lock_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		VM_BUG_ON(!list_empty(&ctl.pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		VM_BUG_ON(ctl.n_pages != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 					 DIV_ROUND_UP_ULL(to_inflate_frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 							  page_in_frames));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		/* Start by allocating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		alloc_error = vmballoon_alloc_page_list(b, &ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 							to_inflate_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		/* Actually lock the pages by telling the hypervisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		lock_error = vmballoon_lock(b, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		 * If an error indicates that something serious went wrong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		 * stop the inflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		if (lock_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		/* Update the balloon size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 					    ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		 * If allocation failed or the number of refused pages exceeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		 * the maximum allowed, move to the next page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (alloc_error ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			 * Split the refused pages to 4k. This will also empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			 * the refused pages list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			vmballoon_split_refused_pages(&ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			ctl.page_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	 * Release pages that were allocated while attempting to inflate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	 * balloon but were refused by the host for one reason or another,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	 * and update the statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (ctl.n_refused_pages != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		vmballoon_release_refused_pages(b, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * vmballoon_deflate() - Decrease the size of the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * @b: pointer to the balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * @n_frames: the number of frames to deflate. If zero, automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * calculated according to the target size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * @coordinated: whether to coordinate with the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * Decrease the size of the balloon allowing guest to use more memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * Return: The number of deflated frames (i.e., basic page size units)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 				       bool coordinated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	unsigned long deflated_frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	unsigned long tried_frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct vmballoon_ctl ctl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		.pages = LIST_HEAD_INIT(ctl.pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		.page_size = VMW_BALLOON_4K_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		.op = VMW_BALLOON_DEFLATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	/* free pages to reach target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		unsigned int to_deflate_pages, n_unlocked_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		unsigned int page_in_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		int64_t to_deflate_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		bool deflated_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		VM_BUG_ON(!list_empty(&ctl.pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		VM_BUG_ON(ctl.n_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		VM_BUG_ON(!list_empty(&ctl.refused_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		VM_BUG_ON(ctl.n_refused_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		 * If we were requested a specific number of frames, we try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		 * deflate this number of frames. Otherwise, deflation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		 * performed according to the target and balloon size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		to_deflate_frames = n_frames ? n_frames - tried_frames :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 					       -vmballoon_change(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		/* break if no work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		if (to_deflate_frames <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 * Calculate the number of frames based on current page size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		 * but limit the deflated frames to a single chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 					 DIV_ROUND_UP_ULL(to_deflate_frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 							  page_in_frames));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		/* First take the pages from the balloon pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 					    ctl.page_size, to_deflate_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		 * Before pages are moving to the refused list, count their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		 * frames as frames that we tried to deflate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		tried_frames += ctl.n_pages * page_in_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		 * Unlock the pages by communicating with the hypervisor if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		 * communication is coordinated (i.e., not pop). We ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		 * return code. Instead we check if all the pages we manage to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		 * unlock all the pages. If we failed, we will move to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		 * page size, and would eventually try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		if (coordinated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			vmballoon_lock(b, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 * Check if we deflated enough. We will move to the next page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		 * size if we did not manage to do so. This calculation takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		 * place now, as once the pages are released, the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		 * pages is zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		deflated_all = (ctl.n_pages == to_deflate_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		/* Update local and global counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		n_unlocked_frames = ctl.n_pages * page_in_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		atomic64_sub(n_unlocked_frames, &b->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		deflated_frames += n_unlocked_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 					 ctl.page_size, ctl.n_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		/* free the ballooned pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 					    ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		/* Return the refused pages to the ballooned list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 					    &ctl.n_refused_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 					    ctl.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		/* If we failed to unlock all the pages, move to next size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		if (!deflated_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			if (ctl.page_size == b->max_page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			ctl.page_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	return deflated_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  * vmballoon_deinit_batching - disables batching mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * @b: pointer to &struct vmballoon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * Disables batching, by deallocating the page for communication with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  * hypervisor and disabling the static key to indicate that batching is off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static void vmballoon_deinit_batching(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	free_page((unsigned long)b->batch_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	b->batch_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	static_branch_disable(&vmw_balloon_batching);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	b->batch_max_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  * vmballoon_init_batching - enable batching mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  * @b: pointer to &struct vmballoon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * Enables batching, by allocating a page for communication with the hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * and enabling the static_key to use batching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * Return: zero on success or an appropriate error-code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static int vmballoon_init_batching(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	b->batch_page = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	static_branch_enable(&vmw_balloon_batching);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  * Receive notification and resize balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static void vmballoon_doorbell(void *client_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct vmballoon *b = client_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)  * Clean up vmci doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void vmballoon_vmci_cleanup(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		      VMCI_INVALID_ID, VMCI_INVALID_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		vmci_doorbell_destroy(b->vmci_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		b->vmci_doorbell = VMCI_INVALID_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  * vmballoon_vmci_init - Initialize vmci doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * Return: zero on success or when wakeup command not supported. Error-code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * Initialize vmci doorbell, to get notified as soon as balloon changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int vmballoon_vmci_init(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	unsigned long error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				     vmballoon_doorbell, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (error != VMCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 				b->vmci_doorbell.context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				b->vmci_doorbell.resource, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (error != VMW_BALLOON_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	vmballoon_vmci_cleanup(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  * vmballoon_pop - Quickly release all pages allocate for the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  * This function is called when host decides to "reset" balloon for one reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  * or another. Unlike normal "deflate" we do not (shall not) notify host of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  * pages being released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static void vmballoon_pop(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	while ((size = atomic64_read(&b->size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		vmballoon_deflate(b, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * Perform standard reset sequence by popping the balloon (in case it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  * is not  empty) and then restarting protocol. This operation normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void vmballoon_reset(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	down_write(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	vmballoon_vmci_cleanup(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	/* free all pages, skipping monitor unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	vmballoon_pop(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		if (vmballoon_init_batching(b)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			 * We failed to initialize batching, inform the monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			 * about it by sending a null capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			 * The guest will retry in one second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			vmballoon_send_start(b, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		vmballoon_deinit_batching(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	b->reset_required = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	error = vmballoon_vmci_init(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		pr_err("failed to initialize vmci doorbell\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (vmballoon_send_guest_id(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		pr_err("failed to send guest ID to the host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	up_write(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * @work: pointer to the &work_struct which is provided by the workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * Resets the protocol if needed, gets the new size and adjusts balloon as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  * needed. Repeat in 1 sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static void vmballoon_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct delayed_work *dwork = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	int64_t change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (b->reset_required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		vmballoon_reset(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	down_read(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	 * Update the stats while holding the semaphore to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 * @stats_enabled is consistent with whether the stats are actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 * enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!vmballoon_send_get_target(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		change = vmballoon_change(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	if (change != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		pr_debug("%s - size: %llu, target %lu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			 atomic64_read(&b->size), READ_ONCE(b->target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		if (change > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			vmballoon_inflate(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		else  /* (change < 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			vmballoon_deflate(b, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	up_read(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	 * We are using a freezable workqueue so that balloon operations are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	 * stopped while the system transitions to/from sleep/hibernation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	queue_delayed_work(system_freezable_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			   dwork, round_jiffies_relative(HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  * @shrinker: pointer to the balloon shrinker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  * @sc: page reclaim information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  * Returns: number of pages that were freed during deflation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 					     struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	struct vmballoon *b = &balloon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	unsigned long deflated_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 * If the lock is also contended for read, we cannot easily reclaim and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	 * we bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	if (!down_read_trylock(&b->conf_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 				deflated_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 * Delay future inflation for some time to mitigate the situations in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 * the access is asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	up_read(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	return deflated_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * vmballoon_shrinker_count() - return the number of ballooned pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  * @shrinker: pointer to the balloon shrinker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * @sc: page reclaim information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  * Returns: number of 4k pages that are allocated for the balloon and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  *	    therefore be reclaimed under pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 					      struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	struct vmballoon *b = &balloon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	return atomic64_read(&b->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static void vmballoon_unregister_shrinker(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if (b->shrinker_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		unregister_shrinker(&b->shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	b->shrinker_registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) static int vmballoon_register_shrinker(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	/* Do nothing if the shrinker is not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (!vmwballoon_shrinker_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	b->shrinker.scan_objects = vmballoon_shrinker_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	b->shrinker.count_objects = vmballoon_shrinker_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	b->shrinker.seeks = DEFAULT_SEEKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	r = register_shrinker(&b->shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	if (r == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		b->shrinker_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * DEBUGFS Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static const char * const vmballoon_stat_page_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static const char * const vmballoon_stat_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	[VMW_BALLOON_STAT_TIMER]		= "timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	[VMW_BALLOON_STAT_RESET]		= "reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static int vmballoon_enable_stats(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	down_write(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	/* did we somehow race with another reader which enabled stats? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (b->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (!b->stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		/* allocation failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	static_key_enable(&balloon_stat_enabled.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	up_write(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * vmballoon_debug_show - shows statistics of balloon operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  * @f: pointer to the &struct seq_file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  * @offset: ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)  * Provides the statistics that can be accessed in vmmemctl in the debugfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  * To avoid the overhead - mainly that of memory - of collecting the statistics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  * we only collect statistics after the first time the counters are read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * Return: zero on success or an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static int vmballoon_debug_show(struct seq_file *f, void *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	struct vmballoon *b = f->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	/* enables stats if they are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	if (!b->stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		int r = vmballoon_enable_stats(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	/* format capabilities info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		   VMW_BALLOON_CAPABILITIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	seq_printf(f, "%-22s: %16s\n", "is resetting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		   b->reset_required ? "y" : "n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	/* format size info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		if (vmballoon_cmd_names[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			   vmballoon_cmd_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		seq_printf(f, "%-22s: %16llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			   vmballoon_stat_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			   atomic64_read(&b->stats->general_stat[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			seq_printf(f, "%-18s(%s): %16llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 				   vmballoon_stat_page_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				   vmballoon_page_size_names[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				   atomic64_read(&b->stats->page_stat[i][j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static void __init vmballoon_debugfs_init(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 					   &vmballoon_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	static_key_disable(&balloon_stat_enabled.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	debugfs_remove(b->dbg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	kfree(b->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	b->stats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static inline void vmballoon_debugfs_init(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static inline void vmballoon_debugfs_exit(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) #endif	/* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) #ifdef CONFIG_BALLOON_COMPACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static int vmballoon_init_fs_context(struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static struct file_system_type vmballoon_fs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	.name           	= "balloon-vmware",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	.init_fs_context	= vmballoon_init_fs_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	.kill_sb        	= kill_anon_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static struct vfsmount *vmballoon_mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  * vmballoon_migratepage() - migrates a balloon page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * @b_dev_info: balloon device information descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * @newpage: the page to which @page should be migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * @page: a ballooned page that should be migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * @mode: migration mode, ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  * This function is really open-coded, but that is according to the interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  * that balloon_compaction provides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  * Return: zero on success, -EAGAIN when migration cannot be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  *	   momentarily, and -EBUSY if migration failed and should be retried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  *	   with that specific page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 				 struct page *newpage, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				 enum migrate_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	unsigned long status, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	struct vmballoon *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	 * If the semaphore is taken, there is ongoing configuration change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	 * (i.e., balloon reset), so try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	if (!down_read_trylock(&b->conf_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	spin_lock(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	 * We must start by deflating and not inflating, as otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	 * hypervisor may tell us that it has enough memory and the new page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	 * not needed. Since the old page is isolated, we cannot use the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	 * interface to unlock it, as the LRU field is used for isolation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	 * Instead, we use the native interface directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	vmballoon_add_page(b, 0, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 				   VMW_BALLOON_DEFLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	if (status == VMW_BALLOON_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		status = vmballoon_status_page(b, 0, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	 * If a failure happened, let the migration mechanism know that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	 * should not retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (status != VMW_BALLOON_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		spin_unlock(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	 * The page is isolated, so it is safe to delete it without holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	 * @pages_lock . We keep holding @comm_lock since we will need it in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	 * second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	balloon_page_delete(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/* Inflate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	vmballoon_add_page(b, 0, newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 				   VMW_BALLOON_INFLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (status == VMW_BALLOON_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		status = vmballoon_status_page(b, 0, &newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	spin_unlock(&b->comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	if (status != VMW_BALLOON_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		 * A failure happened. While we can deflate the page we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		 * inflated, this deflation can also encounter an error. Instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		 * we will decrease the size of the balloon to reflect the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		 * change and report failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		atomic64_dec(&b->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		 * Success. Take a reference for the page, and we will add it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		 * the list after acquiring the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		get_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		ret = MIGRATEPAGE_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	/* Update the balloon list under the @pages_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	 * On inflation success, we already took a reference for the @newpage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	 * If we succeed just insert it to the list and update the statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	 * under the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (ret == MIGRATEPAGE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		balloon_page_insert(&b->b_dev_info, newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		__count_vm_event(BALLOON_MIGRATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	 * We deflated successfully, so regardless to the inflation success, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	 * need to reduce the number of isolated_pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	b->b_dev_info.isolated_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	up_read(&b->conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * vmballoon_compaction_deinit() - removes compaction related data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) static void vmballoon_compaction_deinit(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (!IS_ERR(b->b_dev_info.inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		iput(b->b_dev_info.inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	b->b_dev_info.inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	kern_unmount(vmballoon_mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	vmballoon_mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)  * vmballoon_compaction_init() - initialized compaction for the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)  * @b: pointer to the balloon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)  * If during the initialization a failure occurred, this function does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)  * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)  * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * Return: zero on success or error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static __init int vmballoon_compaction_init(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	vmballoon_mnt = kern_mount(&vmballoon_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	if (IS_ERR(vmballoon_mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		return PTR_ERR(vmballoon_mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	b->b_dev_info.migratepage = vmballoon_migratepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	if (IS_ERR(b->b_dev_info.inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		return PTR_ERR(b->b_dev_info.inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) #else /* CONFIG_BALLOON_COMPACTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static void vmballoon_compaction_deinit(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) static int vmballoon_compaction_init(struct vmballoon *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) #endif /* CONFIG_BALLOON_COMPACTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int __init vmballoon_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	 * Check if we are running on VMware's hypervisor and bail out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	 * if we are not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (x86_hyper_type != X86_HYPER_VMWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	error = vmballoon_register_shrinker(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	 * Initialization of compaction must be done after the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 * balloon_devinfo_init() .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	balloon_devinfo_init(&balloon.b_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	error = vmballoon_compaction_init(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	INIT_LIST_HEAD(&balloon.huge_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	spin_lock_init(&balloon.comm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	init_rwsem(&balloon.conf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	balloon.batch_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	balloon.page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	balloon.reset_required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	vmballoon_debugfs_init(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	vmballoon_unregister_shrinker(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	vmballoon_compaction_deinit(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  * Using late_initcall() instead of module_init() allows the balloon to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  * VMCI is probed only after the balloon is initialized. If the balloon is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)  * as a module, late_initcall() is equivalent to module_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) late_initcall(vmballoon_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) static void __exit vmballoon_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	vmballoon_unregister_shrinker(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	vmballoon_vmci_cleanup(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	cancel_delayed_work_sync(&balloon.dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	vmballoon_debugfs_exit(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	 * Deallocate all reserved memory, and reset connection with monitor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	 * Reset connection before deallocating memory to avoid potential for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	 * additional spurious resets from guest touching deallocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	vmballoon_send_start(&balloon, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	vmballoon_pop(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	/* Only once we popped the balloon, compaction can be deinit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	vmballoon_compaction_deinit(&balloon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) module_exit(vmballoon_exit);