Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Lockless hierarchical page accounting & limiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/page_counter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static void propagate_protected_usage(struct page_counter *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 				      unsigned long usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	unsigned long protected, old_protected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	unsigned long low, min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	if (!c->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	min = READ_ONCE(c->min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (min || atomic_long_read(&c->min_usage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		protected = min(usage, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		old_protected = atomic_long_xchg(&c->min_usage, protected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		delta = protected - old_protected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		if (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			atomic_long_add(delta, &c->parent->children_min_usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	low = READ_ONCE(c->low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (low || atomic_long_read(&c->low_usage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		protected = min(usage, low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		old_protected = atomic_long_xchg(&c->low_usage, protected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		delta = protected - old_protected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		if (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			atomic_long_add(delta, &c->parent->children_low_usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * page_counter_cancel - take pages out of the local counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * @nr_pages: number of pages to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	new = atomic_long_sub_return(nr_pages, &counter->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	propagate_protected_usage(counter, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* More uncharges than charges? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	WARN_ON_ONCE(new < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * page_counter_charge - hierarchically charge pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * @nr_pages: number of pages to charge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * NOTE: This does not consider any configured counter limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct page_counter *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	for (c = counter; c; c = c->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		new = atomic_long_add_return(nr_pages, &c->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		propagate_protected_usage(c, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		 * This is indeed racy, but we can live with some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 * inaccuracy in the watermark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		if (new > READ_ONCE(c->watermark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			WRITE_ONCE(c->watermark, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * page_counter_try_charge - try to hierarchically charge pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * @nr_pages: number of pages to charge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * @fail: points first counter to hit its limit, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * Returns %true on success, or %false and @fail if the counter or one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * of its ancestors has hit its configured limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) bool page_counter_try_charge(struct page_counter *counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			     unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			     struct page_counter **fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct page_counter *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	for (c = counter; c; c = c->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		 * Charge speculatively to avoid an expensive CAS.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		 * a bigger charge fails, it might falsely lock out a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		 * racing smaller charge and send it into reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		 * early, but the error is limited to the difference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * between the two sizes, which is less than 2M/4M in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 * case of a THP locking out a regular page charge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		 * The atomic_long_add_return() implies a full memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		 * barrier between incrementing the count and reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		 * the limit.  When racing with page_counter_set_max(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		 * we either see the new limit or the setter sees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		 * counter has changed and retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		new = atomic_long_add_return(nr_pages, &c->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		if (new > c->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			atomic_long_sub(nr_pages, &c->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			propagate_protected_usage(c, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			 * This is racy, but we can live with some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			 * inaccuracy in the failcnt which is only used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			 * to report stats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			data_race(c->failcnt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			*fail = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		propagate_protected_usage(c, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		 * Just like with failcnt, we can live with some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		 * inaccuracy in the watermark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if (new > READ_ONCE(c->watermark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			WRITE_ONCE(c->watermark, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	for (c = counter; c != *fail; c = c->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		page_counter_cancel(c, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * page_counter_uncharge - hierarchically uncharge pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * @nr_pages: number of pages to uncharge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct page_counter *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	for (c = counter; c; c = c->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		page_counter_cancel(c, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * page_counter_set_max - set the maximum number of pages allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * @nr_pages: limit to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Returns 0 on success, -EBUSY if the current number of pages on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * counter already exceeds the specified limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * The caller must serialize invocations on the same counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		unsigned long old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		long usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		 * Update the limit while making sure that it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		 * below the concurrently-changing counter value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 * The xchg implies two full memory barriers before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 * and after, so the read-swap-read is ordered and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		 * ensures coherency with page_counter_try_charge():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		 * that function modifies the count before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		 * the limit, so if it sees the old limit, we see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 * modified counter and retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		usage = atomic_long_read(&counter->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		if (usage > nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		old = xchg(&counter->max, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (atomic_long_read(&counter->usage) <= usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		counter->max = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * page_counter_set_min - set the amount of protected memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * @nr_pages: value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * The caller must serialize invocations on the same counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct page_counter *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	WRITE_ONCE(counter->min, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	for (c = counter; c; c = c->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		propagate_protected_usage(c, atomic_long_read(&c->usage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * page_counter_set_low - set the amount of protected memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * @counter: counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * @nr_pages: value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * The caller must serialize invocations on the same counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct page_counter *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	WRITE_ONCE(counter->low, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	for (c = counter; c; c = c->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		propagate_protected_usage(c, atomic_long_read(&c->usage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * page_counter_memparse - memparse() for page counter limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * @buf: string to parse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * @max: string meaning maximum possible value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * @nr_pages: returns the result in number of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * Returns -EINVAL, or 0 and @nr_pages on success.  @nr_pages will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * limited to %PAGE_COUNTER_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int page_counter_memparse(const char *buf, const char *max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			  unsigned long *nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	u64 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!strcmp(buf, max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		*nr_pages = PAGE_COUNTER_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	bytes = memparse(buf, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (*end != '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	*nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }