Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Linux VM pressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2012 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *		  Anton Vorontsov <anton.vorontsov@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/vmstat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/eventfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/vmpressure.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * The window size (vmpressure_win) is the number of scanned pages before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * we try to analyze scanned/reclaimed ratio. So the window is used as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * rate-limit tunable for the "low" level notification, and also for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * averaging the ratio for medium/critical levels. Using small window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * sizes can cause lot of false positives, but too big window size will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * delay the notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * As the vmscan reclaimer logic works with chunks which are multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * TODO: Make the window size depend on machine size, as we do for vmstat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * These thresholds are used when we account memory pressure through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * scanned/reclaimed ratio. The current values were chosen empirically. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * essence, they are percents: the higher the value, the more number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * unsuccessful reclaims there were.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static const unsigned int vmpressure_level_med = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static const unsigned int vmpressure_level_critical = 95;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * When there are too little pages left to scan, vmpressure() may miss the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * critical pressure as number of pages will be less than "window size".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * However, in that case the vmscan priority will raise fast as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * reclaimer will try to scan LRUs more deeply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * The vmscan logic considers these special priorities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * prio == DEF_PRIORITY (12): reclaimer starts with that value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * prio == 0                : close to OOM, kernel scans every page in an lru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * Any value in this range is acceptable for this tunable (i.e. from 12 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * 0). Current value for the vmpressure_level_critical_prio is chosen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * empirically, but the number, in essence, means that we consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * critical level when scanning depth is ~10% of the lru size (vmscan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * eights).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static struct vmpressure *work_to_vmpressure(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	return container_of(work, struct vmpressure, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	memcg = parent_mem_cgroup(memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (!memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return memcg_to_vmpressure(memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) enum vmpressure_levels {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	VMPRESSURE_LOW = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	VMPRESSURE_MEDIUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	VMPRESSURE_CRITICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	VMPRESSURE_NUM_LEVELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) enum vmpressure_modes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	VMPRESSURE_NO_PASSTHROUGH = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	VMPRESSURE_HIERARCHY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	VMPRESSURE_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	VMPRESSURE_NUM_MODES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static const char * const vmpressure_str_levels[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	[VMPRESSURE_LOW] = "low",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	[VMPRESSURE_MEDIUM] = "medium",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	[VMPRESSURE_CRITICAL] = "critical",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static const char * const vmpressure_str_modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	[VMPRESSURE_NO_PASSTHROUGH] = "default",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	[VMPRESSURE_HIERARCHY] = "hierarchy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	[VMPRESSURE_LOCAL] = "local",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static enum vmpressure_levels vmpressure_level(unsigned long pressure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (pressure >= vmpressure_level_critical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return VMPRESSURE_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else if (pressure >= vmpressure_level_med)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return VMPRESSURE_MEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return VMPRESSURE_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 						    unsigned long reclaimed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	unsigned long scale = scanned + reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	unsigned long pressure = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * reclaimed can be greater than scanned for things such as reclaimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 * slab pages. shrink_node() just adds reclaimed pages without a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * related increment to scanned pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (reclaimed >= scanned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * We calculate the ratio (in percents) of how many pages were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * scanned vs. reclaimed in a given time frame (window). Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 * time is in VM reclaimer's "ticks", i.e. number of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * scanned. This makes it possible to set desired reaction time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * and serves as a ratelimit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	pressure = scale - (reclaimed * scale / scanned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	pressure = pressure * 100 / scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 scanned, reclaimed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return vmpressure_level(pressure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct vmpressure_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	struct eventfd_ctx *efd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	enum vmpressure_levels level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	enum vmpressure_modes mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static bool vmpressure_event(struct vmpressure *vmpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			     const enum vmpressure_levels level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			     bool ancestor, bool signalled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct vmpressure_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	mutex_lock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	list_for_each_entry(ev, &vmpr->events, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (ancestor && ev->mode == VMPRESSURE_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		if (level < ev->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		eventfd_signal(ev->efd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	mutex_unlock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void vmpressure_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct vmpressure *vmpr = work_to_vmpressure(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unsigned long scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	unsigned long reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	enum vmpressure_levels level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	bool ancestor = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	bool signalled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	spin_lock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 * Several contexts might be calling vmpressure(), so it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	 * possible that the work was rescheduled again before the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * work context cleared the counters. In that case we will run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 * just after the old work returns, but then scanned might be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * here. No need for any locks here since we don't care if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * vmpr->reclaimed is in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	scanned = vmpr->tree_scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (!scanned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		spin_unlock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	reclaimed = vmpr->tree_reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	vmpr->tree_scanned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	vmpr->tree_reclaimed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	spin_unlock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	level = vmpressure_calc_level(scanned, reclaimed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		if (vmpressure_event(vmpr, level, ancestor, signalled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			signalled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		ancestor = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	} while ((vmpr = vmpressure_parent(vmpr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * vmpressure() - Account memory pressure through scanned/reclaimed ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * @gfp:	reclaimer's gfp mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * @memcg:	cgroup memory controller handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * @tree:	legacy subtree mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * @scanned:	number of pages scanned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * @reclaimed:	number of pages reclaimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * This function should be called from the vmscan reclaim path to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * pressure index is then further refined and averaged over time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * If @tree is set, vmpressure is in traditional userspace reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * mode: @memcg is considered the pressure root and userspace is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * notified of the entire subtree's reclaim efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * If @tree is not set, reclaim efficiency is recorded for @memcg, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * only in-kernel users are notified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * This function does not return any value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		unsigned long scanned, unsigned long reclaimed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct vmpressure *vmpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	bool bypass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (mem_cgroup_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	vmpr = memcg_to_vmpressure(memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	trace_android_vh_vmpressure(memcg, &bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (unlikely(bypass))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * Here we only want to account pressure that userland is able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * help us with. For example, suppose that DMA zone is under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * pressure; if we notify userland about that kind of pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * then it will be mostly a waste as it will trigger unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 * freeing of memory by userland (since userland is more likely to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * is why we include only movable, highmem and FS/IO pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * we account it too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * If we got here with no pages scanned, then that is an indicator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * that reclaimer was unable to find any shrinkable LRUs at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * current scanning depth. But it does not mean that we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 * report the critical pressure, yet. If the scanning priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	 * (scanning depth) goes too high (deep), we will be notified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * through vmpressure_prio(). But so far, keep calm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (!scanned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		spin_lock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		scanned = vmpr->tree_scanned += scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		vmpr->tree_reclaimed += reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		spin_unlock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		if (scanned < vmpressure_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		schedule_work(&vmpr->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		enum vmpressure_levels level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		/* For now, no users for root-level efficiency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		if (!memcg || mem_cgroup_is_root(memcg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		spin_lock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		scanned = vmpr->scanned += scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		reclaimed = vmpr->reclaimed += reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		if (scanned < vmpressure_win) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			spin_unlock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		vmpr->scanned = vmpr->reclaimed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		spin_unlock(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		level = vmpressure_calc_level(scanned, reclaimed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (level > VMPRESSURE_LOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			 * Let the socket buffer allocator know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			 * we are having trouble reclaiming LRU pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			 * For hysteresis keep the pressure state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			 * asserted for a second in which subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			 * pressure events can occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			memcg->socket_pressure = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * vmpressure_prio() - Account memory pressure through reclaimer priority level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @gfp:	reclaimer's gfp mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  * @memcg:	cgroup memory controller handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * @prio:	reclaimer's priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * This function should be called from the reclaim path every time when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * the vmscan's reclaiming priority (scanning depth) changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * This function does not return any value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 * We only use prio for accounting critical level. For more info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * see comment for vmpressure_level_critical_prio variable above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (prio > vmpressure_level_critical_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * OK, the prio is below the threshold, updating vmpressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 * information before shrinker dives into long shrinking of long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 * to the vmpressure() basically means that we signal 'critical'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	vmpressure(gfp, memcg, true, vmpressure_win, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define MAX_VMPRESSURE_ARGS_LEN	(strlen("critical") + strlen("hierarchy") + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * @memcg:	memcg that is interested in vmpressure notifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * @eventfd:	eventfd context to link notifications with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * @args:	event arguments (pressure level threshold, optional mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * This function associates eventfd context with the vmpressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * infrastructure, so that the notifications will be delivered to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * @eventfd. The @args parameter is a comma-delimited string that denotes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * "hierarchy" or "local").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * To be used as memcg event method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * not be parsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int vmpressure_register_event(struct mem_cgroup *memcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			      struct eventfd_ctx *eventfd, const char *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct vmpressure_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	enum vmpressure_levels level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	char *spec, *spec_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	char *token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (!spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	/* Find required level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	token = strsep(&spec, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	level = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	/* Find optional mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	token = strsep(&spec, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		mode = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (!ev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	ev->efd = eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	ev->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	ev->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	mutex_lock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	list_add(&ev->node, &vmpr->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	mutex_unlock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	kfree(spec_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * vmpressure_unregister_event() - Unbind eventfd from vmpressure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * @memcg:	memcg handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * @eventfd:	eventfd context that was used to link vmpressure with the @cg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  * This function does internal manipulations to detach the @eventfd from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * the vmpressure notifications, and then frees internal resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * associated with the @eventfd (but the @eventfd itself is not freed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * To be used as memcg event method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) void vmpressure_unregister_event(struct mem_cgroup *memcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 				 struct eventfd_ctx *eventfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct vmpressure_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	mutex_lock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	list_for_each_entry(ev, &vmpr->events, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (ev->efd != eventfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		list_del(&ev->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		kfree(ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	mutex_unlock(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  * vmpressure_init() - Initialize vmpressure control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * @vmpr:	Structure to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * This function should be called on every allocated vmpressure structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * before any usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void vmpressure_init(struct vmpressure *vmpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	spin_lock_init(&vmpr->sr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	mutex_init(&vmpr->events_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	INIT_LIST_HEAD(&vmpr->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	INIT_WORK(&vmpr->work, vmpressure_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * vmpressure_cleanup() - shuts down vmpressure control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  * @vmpr:	Structure to be cleaned up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  * This function should be called before the structure in which it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  * embedded is cleaned up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) void vmpressure_cleanup(struct vmpressure *vmpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	 * Make sure there is no pending work before eventfd infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	 * goes away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	flush_work(&vmpr->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }