Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * jump label support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2011 Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/jump_label_ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) /* mutex to protect coming/going of the jump_label table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static DEFINE_MUTEX(jump_label_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) void jump_label_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	mutex_lock(&jump_label_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) void jump_label_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	mutex_unlock(&jump_label_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static int jump_label_cmp(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	const struct jump_entry *jea = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	const struct jump_entry *jeb = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * Entrires are sorted by key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	if (jump_entry_key(jea) < jump_entry_key(jeb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (jump_entry_key(jea) > jump_entry_key(jeb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 * In the batching mode, entries should also be sorted by the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 * inside the already sorted list of entries, enabling a bsearch in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * the vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (jump_entry_code(jea) < jump_entry_code(jeb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (jump_entry_code(jea) > jump_entry_code(jeb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void jump_label_swap(void *a, void *b, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	long delta = (unsigned long)a - (unsigned long)b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct jump_entry *jea = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct jump_entry *jeb = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct jump_entry tmp = *jea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	jea->code	= jeb->code - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	jea->target	= jeb->target - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	jea->key	= jeb->key - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	jeb->code	= tmp.code + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	jeb->target	= tmp.target + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	jeb->key	= tmp.key + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	void *swapfn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		swapfn = jump_label_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	size = (((unsigned long)stop - (unsigned long)start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 					/ sizeof(struct jump_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static void jump_label_update(struct static_key *key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * The use of 'atomic_read()' requires atomic.h and its problematic for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * kernel headers such as kernel.h and others. Since static_key_count() is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * to have it be a function here. Similarly, for 'static_key_enable()' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * to be included from most/all places for CONFIG_JUMP_LABEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int static_key_count(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * -1 means the first static_key_slow_inc() is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 *  static_key_enabled() must return true, so return 1 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int n = atomic_read(&key->enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	return n >= 0 ? n : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) EXPORT_SYMBOL_GPL(static_key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void static_key_slow_inc_cpuslocked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int v, v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * Careful if we get concurrent static_key_slow_inc() calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * later calls must wait for the first one to _finish_ the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * jump_label_update() process.  At the same time, however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * the jump_label_update() call below wants to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * static_key_enabled(&key) for jumps to be updated properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * So give a special meaning to negative key->enabled: it sends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 * static_key_slow_inc() down the slow path, and it is non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * so it counts as "enabled" in jump_label_update().  Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		if (likely(v1 == v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (atomic_read(&key->enabled) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		atomic_set(&key->enabled, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		jump_label_update(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		 * Ensure that if the above cmpxchg loop observes our positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		 * value, it must also observe all the text changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		atomic_set_release(&key->enabled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		atomic_inc(&key->enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void static_key_slow_inc(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	static_key_slow_inc_cpuslocked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) EXPORT_SYMBOL_GPL(static_key_slow_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) void static_key_enable_cpuslocked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (atomic_read(&key->enabled) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (atomic_read(&key->enabled) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		atomic_set(&key->enabled, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		jump_label_update(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 * See static_key_slow_inc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		atomic_set_release(&key->enabled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void static_key_enable(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	static_key_enable_cpuslocked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) EXPORT_SYMBOL_GPL(static_key_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void static_key_disable_cpuslocked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (atomic_read(&key->enabled) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (atomic_cmpxchg(&key->enabled, 1, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		jump_label_update(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void static_key_disable(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	static_key_disable_cpuslocked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) EXPORT_SYMBOL_GPL(static_key_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static bool static_key_slow_try_dec(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (val == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * The negative count check is valid even when a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * key->enabled is in use by static_key_slow_inc(); a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * __static_key_slow_dec() before the first static_key_slow_inc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * returns is unbalanced, because all other static_key_slow_inc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * instances block while the update is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	WARN(val < 0, "jump label: negative count!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void __static_key_slow_dec_cpuslocked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (static_key_slow_try_dec(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (atomic_dec_and_test(&key->enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		jump_label_update(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void __static_key_slow_dec(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	__static_key_slow_dec_cpuslocked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void jump_label_update_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	struct static_key_deferred *key =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		container_of(work, struct static_key_deferred, work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	__static_key_slow_dec(&key->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) EXPORT_SYMBOL_GPL(jump_label_update_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void static_key_slow_dec(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	__static_key_slow_dec(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) EXPORT_SYMBOL_GPL(static_key_slow_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void static_key_slow_dec_cpuslocked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	__static_key_slow_dec_cpuslocked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void __static_key_slow_dec_deferred(struct static_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				    struct delayed_work *work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				    unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (static_key_slow_try_dec(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	schedule_delayed_work(work, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void __static_key_deferred_flush(void *key, struct delayed_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	flush_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void jump_label_rate_limit(struct static_key_deferred *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		unsigned long rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	STATIC_KEY_CHECK_USE(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	key->timeout = rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) EXPORT_SYMBOL_GPL(jump_label_rate_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int addr_conflict(struct jump_entry *entry, void *start, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (jump_entry_code(entry) <= (unsigned long)end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	    jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int __jump_label_text_reserved(struct jump_entry *iter_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		struct jump_entry *iter_stop, void *start, void *end, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct jump_entry *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	iter = iter_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	while (iter < iter_stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		if (init || !jump_entry_is_init(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			if (addr_conflict(iter, start, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		iter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * Update code which is definitely not currently executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * Architectures which need heavyweight synchronization to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * running code can override this to make the non-live update case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * cheaper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					    enum jump_label_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	arch_jump_label_transform(entry, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static inline struct jump_entry *static_key_entries(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static inline bool static_key_type(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	return key->type & JUMP_TYPE_TRUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static inline bool static_key_linked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return key->type & JUMP_TYPE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline void static_key_clear_linked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	key->type &= ~JUMP_TYPE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline void static_key_set_linked(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	key->type |= JUMP_TYPE_LINKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * A 'struct static_key' uses a union such that it either points directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * to a table of 'struct jump_entry' or to a linked list of modules which in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * turn point to 'struct jump_entry' tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * The two lower bits of the pointer are used to keep track of which pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * type is in use and to store the initial branch direction, we use an access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * function which preserves these bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void static_key_set_entries(struct static_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 				   struct jump_entry *entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	type = key->type & JUMP_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	key->entries = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	key->type |= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static enum jump_label_type jump_label_type(struct jump_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	struct static_key *key = jump_entry_key(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	bool enabled = static_key_enabled(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	bool branch = jump_entry_is_branch(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	/* See the comment in linux/jump_label.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return enabled ^ branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static bool jump_label_can_update(struct jump_entry *entry, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * Cannot update code that was in an init text area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (!init && jump_entry_is_init(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (!kernel_text_address(jump_entry_code(entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		 * This skips patching built-in __exit, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		 * is part of init_section_contains() but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		 * not part of kernel_text_address().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		 * Skipping built-in __exit is fine since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		 * will never be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		WARN_ONCE(!jump_entry_is_init(entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			  "can't patch jump_label at %pS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			  (void *)jump_entry_code(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) #ifndef HAVE_JUMP_LABEL_BATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void __jump_label_update(struct static_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				struct jump_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 				struct jump_entry *stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 				bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		if (jump_label_can_update(entry, init))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			arch_jump_label_transform(entry, jump_label_type(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void __jump_label_update(struct static_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				struct jump_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 				struct jump_entry *stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 				bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (!jump_label_can_update(entry, init))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			 * Queue is full: Apply the current queue and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			arch_jump_label_transform_apply();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	arch_jump_label_transform_apply();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void __init jump_label_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	struct jump_entry *iter_start = __start___jump_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	struct jump_entry *iter_stop = __stop___jump_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct static_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct jump_entry *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 * Since we are initializing the static_key.enabled field with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 * jump_label.h, let's make sure that is safe. There are only two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 * cases to check since we initialize to 0 or 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (static_key_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	jump_label_sort_entries(iter_start, iter_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	for (iter = iter_start; iter < iter_stop; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		struct static_key *iterk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		/* rewrite NOPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		if (init_section_contains((void *)jump_entry_code(iter), 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			jump_entry_set_init(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		iterk = jump_entry_key(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		if (iterk == key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		key = iterk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		static_key_set_entries(key, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	static_key_initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	struct static_key *key = jump_entry_key(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	bool type = static_key_type(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	bool branch = jump_entry_is_branch(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	/* See the comment in linux/jump_label.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	return type ^ branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct static_key_mod {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	struct static_key_mod *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	struct jump_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct module *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static inline struct static_key_mod *static_key_mod(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	WARN_ON_ONCE(!static_key_linked(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  * key->type and key->next are the same via union.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)  * This sets key->next and preserves the type bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  * See additional comments above static_key_set_entries().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void static_key_set_mod(struct static_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			       struct static_key_mod *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	type = key->type & JUMP_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	key->next = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	key->type |= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int __jump_label_mod_text_reserved(void *start, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	struct module *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	mod = __module_text_address((unsigned long)start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (!try_module_get(mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		mod = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	ret = __jump_label_text_reserved(mod->jump_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 				mod->jump_entries + mod->num_jump_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 				start, end, mod->state == MODULE_STATE_COMING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	module_put(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void __jump_label_mod_update(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	struct static_key_mod *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	for (mod = static_key_mod(key); mod; mod = mod->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		struct jump_entry *stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		struct module *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		 * NULL if the static_key is defined in a module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		 * that does not use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		if (!mod->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		m = mod->mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 			stop = __stop___jump_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			stop = m->jump_entries + m->num_jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		__jump_label_update(key, mod->entries, stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 				    m && m->state == MODULE_STATE_COMING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  * @mod: module to patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  * Allow for run-time selection of the optimal nops. Before the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  * loads patch these with arch_get_jump_label_nop(), which is specified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * the arch specific jump label code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void jump_label_apply_nops(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	struct jump_entry *iter_start = mod->jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	struct jump_entry *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	/* if the module doesn't have jump label entries, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (iter_start == iter_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	for (iter = iter_start; iter < iter_stop; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		/* Only write NOPs for arch_branch_static(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int jump_label_add_module(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	struct jump_entry *iter_start = mod->jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	struct jump_entry *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct static_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	struct static_key_mod *jlm, *jlm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	/* if the module doesn't have jump label entries, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (iter_start == iter_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	jump_label_sort_entries(iter_start, iter_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	for (iter = iter_start; iter < iter_stop; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		struct static_key *iterk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		if (within_module_init(jump_entry_code(iter), mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 			jump_entry_set_init(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		iterk = jump_entry_key(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		if (iterk == key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		key = iterk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		if (within_module((unsigned long)key, mod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			static_key_set_entries(key, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		if (!jlm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		if (!static_key_linked(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			jlm2 = kzalloc(sizeof(struct static_key_mod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			if (!jlm2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 				kfree(jlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 			preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			jlm2->mod = __module_address((unsigned long)key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			jlm2->entries = static_key_entries(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 			jlm2->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			static_key_set_mod(key, jlm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			static_key_set_linked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		jlm->mod = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		jlm->entries = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		jlm->next = static_key_mod(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		static_key_set_mod(key, jlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		static_key_set_linked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		/* Only update if we've changed from our initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		if (jump_label_type(iter) != jump_label_init_type(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 			__jump_label_update(key, iter, iter_stop, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static void jump_label_del_module(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct jump_entry *iter_start = mod->jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	struct jump_entry *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	struct static_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	struct static_key_mod *jlm, **prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	for (iter = iter_start; iter < iter_stop; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		if (jump_entry_key(iter) == key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		key = jump_entry_key(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		if (within_module((unsigned long)key, mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		/* No memory during module load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		if (WARN_ON(!static_key_linked(key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		prev = &key->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		jlm = static_key_mod(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		while (jlm && jlm->mod != mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 			prev = &jlm->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 			jlm = jlm->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		/* No memory during module load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		if (WARN_ON(!jlm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		if (prev == &key->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 			static_key_set_mod(key, jlm->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 			*prev = jlm->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		kfree(jlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		jlm = static_key_mod(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		/* if only one etry is left, fold it back into the static_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		if (jlm->next == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 			static_key_set_entries(key, jlm->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			static_key_clear_linked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 			kfree(jlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) jump_label_module_notify(struct notifier_block *self, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			 void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	struct module *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	case MODULE_STATE_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 		ret = jump_label_add_module(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 			jump_label_del_module(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	case MODULE_STATE_GOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 		jump_label_del_module(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	return notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static struct notifier_block jump_label_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	.notifier_call = jump_label_module_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	.priority = 1, /* higher than tracepoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static __init int jump_label_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	return register_module_notifier(&jump_label_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) early_initcall(jump_label_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)  * jump_label_text_reserved - check if addr range is reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)  * @start: start text addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)  * @end: end text addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)  * checks if the text addr located between @start and @end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)  * overlaps with any of the jump label patch addresses. Code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)  * that wants to modify kernel text should first verify that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)  * it does not overlap with any of the jump label addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)  * Caller must hold jump_label_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)  * returns 1 if there is an overlap, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int jump_label_text_reserved(void *start, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	bool init = system_state < SYSTEM_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	int ret = __jump_label_text_reserved(__start___jump_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 			__stop___jump_table, start, end, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	ret = __jump_label_mod_text_reserved(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static void jump_label_update(struct static_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	struct jump_entry *stop = __stop___jump_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	struct jump_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	struct module *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	if (static_key_linked(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		__jump_label_mod_update(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	mod = __module_address((unsigned long)key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	if (mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		stop = mod->jump_entries + mod->num_jump_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	entry = static_key_entries(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	/* if there are no users, entry can be NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		__jump_label_update(key, entry, stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 				    system_state < SYSTEM_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #ifdef CONFIG_STATIC_KEYS_SELFTEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static DEFINE_STATIC_KEY_TRUE(sk_true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static DEFINE_STATIC_KEY_FALSE(sk_false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static __init int jump_label_test(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		WARN_ON(static_key_enabled(&sk_true.key) != true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 		WARN_ON(static_key_enabled(&sk_false.key) != false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		WARN_ON(!static_branch_likely(&sk_true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		WARN_ON(!static_branch_unlikely(&sk_true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		WARN_ON(static_branch_likely(&sk_false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		WARN_ON(static_branch_unlikely(&sk_false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		static_branch_disable(&sk_true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		static_branch_enable(&sk_false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		WARN_ON(static_key_enabled(&sk_true.key) == true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		WARN_ON(static_key_enabled(&sk_false.key) == false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		WARN_ON(static_branch_likely(&sk_true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		WARN_ON(static_branch_unlikely(&sk_true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		WARN_ON(!static_branch_likely(&sk_false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		WARN_ON(!static_branch_unlikely(&sk_false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 		static_branch_enable(&sk_true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 		static_branch_disable(&sk_false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) early_initcall(jump_label_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #endif /* STATIC_KEYS_SELFTEST */