Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Kernel Probes (KProbes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  kernel/kprobes.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) IBM Corporation, 2002, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *		Probes initial implementation (includes suggestions from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *		Rusty Russell).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *		hlists and exceptions notifier as suggested by Andi Kleen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *		interface to access function arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *		exceptions notifier to be first on the priority list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *		<prasanna@in.ibm.com> added function-return probes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/moduleloader.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/static_call.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define KPROBE_HASH_BITS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static int kprobes_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /* kprobe_table can be accessed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * Or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* NOTE: change this value only with kprobe_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static bool kprobes_all_disarmed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* This protects kprobe_table and optimizing_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static DEFINE_MUTEX(kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	raw_spinlock_t lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) } kretprobe_table_locks[KPROBE_TABLE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 					unsigned int __unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return &(kretprobe_table_locks[hash].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /* Blacklist -- list of struct kprobe_blacklist_entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static LIST_HEAD(kprobe_blacklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * kprobe->ainsn.insn points to the copy of the instruction to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * single-stepped. x86_64, POWER4 and above have no-exec support and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * stepping on the instruction on a vmalloced/kmalloced/data page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * is a recipe for disaster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) struct kprobe_insn_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	kprobe_opcode_t *insns;		/* Page of instruction slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct kprobe_insn_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	int nused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	int ngarbage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	char slot_used[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define KPROBE_INSN_PAGE_SIZE(slots)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	(offsetof(struct kprobe_insn_page, slot_used) +	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 (sizeof(char) * (slots)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static int slots_per_page(struct kprobe_insn_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) enum kprobe_slot_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	SLOT_CLEAN = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	SLOT_DIRTY = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	SLOT_USED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) void __weak *alloc_insn_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	return module_alloc(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) void __weak free_insn_page(void *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	module_memfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) struct kprobe_insn_cache kprobe_insn_slots = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	.alloc = alloc_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	.free = free_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	.sym = KPROBE_INSN_PAGE_SYM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	.insn_size = MAX_INSN_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	.nr_garbage = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static int collect_garbage_slots(struct kprobe_insn_cache *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * __get_insn_slot() - Find a slot on an executable page for an instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * We allocate an executable page if there's no room on existing ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	struct kprobe_insn_page *kip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	kprobe_opcode_t *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/* Since the slot array is not protected by rcu, we need a mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	mutex_lock(&c->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	list_for_each_entry_rcu(kip, &c->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		if (kip->nused < slots_per_page(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			for (i = 0; i < slots_per_page(c); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 				if (kip->slot_used[i] == SLOT_CLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 					kip->slot_used[i] = SLOT_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 					kip->nused++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 					slot = kip->insns + (i * c->insn_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 					rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			/* kip->nused is broken. Fix it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			kip->nused = slots_per_page(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* If there are any garbage slots, collect it and try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (c->nr_garbage && collect_garbage_slots(c) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* All out of space.  Need to allocate a new page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	if (!kip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	 * Use module_alloc so this page is within +/- 2GB of where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	 * kernel image and loaded module images reside. This is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	 * so x86_64 can correctly handle the %rip-relative fixups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	kip->insns = c->alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (!kip->insns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		kfree(kip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	INIT_LIST_HEAD(&kip->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	kip->slot_used[0] = SLOT_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	kip->nused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	kip->ngarbage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	kip->cache = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	list_add_rcu(&kip->list, &c->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	slot = kip->insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/* Record the perf ksymbol register event after adding the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			   PAGE_SIZE, false, c->sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	mutex_unlock(&c->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) /* Return 1 if all garbages are collected, otherwise 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	kip->slot_used[idx] = SLOT_CLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	kip->nused--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	if (kip->nused == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		 * Page is no longer in use.  Free it unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		 * it's the last one.  We keep the last one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 * so as not to have to set it up again the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		 * next time somebody inserts a probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		if (!list_is_singular(&kip->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			 * Record perf ksymbol unregister event before removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			 * the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 					   (unsigned long)kip->insns, PAGE_SIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 					   kip->cache->sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			list_del_rcu(&kip->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			kip->cache->free(kip->insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			kfree(kip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static int collect_garbage_slots(struct kprobe_insn_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct kprobe_insn_page *kip, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	/* Ensure no-one is interrupted on the garbages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	list_for_each_entry_safe(kip, next, &c->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		if (kip->ngarbage == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		kip->ngarbage = 0;	/* we will collect all garbages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		for (i = 0; i < slots_per_page(c); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	c->nr_garbage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) void __free_insn_slot(struct kprobe_insn_cache *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		      kprobe_opcode_t *slot, int dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct kprobe_insn_page *kip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	mutex_lock(&c->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	list_for_each_entry_rcu(kip, &c->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		idx = ((long)slot - (long)kip->insns) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			(c->insn_size * sizeof(kprobe_opcode_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		if (idx >= 0 && idx < slots_per_page(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* Could not find this slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	kip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	/* Mark and sweep: this may sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (kip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		/* Check double free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		WARN_ON(kip->slot_used[idx] != SLOT_USED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		if (dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			kip->slot_used[idx] = SLOT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			kip->ngarbage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			if (++c->nr_garbage > slots_per_page(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 				collect_garbage_slots(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			collect_one_slot(kip, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	mutex_unlock(&c->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * Check given address is on the page of kprobe instruction slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * This will be used for checking whether the address on a stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * is on a text area or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct kprobe_insn_page *kip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	list_for_each_entry_rcu(kip, &c->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		if (addr >= (unsigned long)kip->insns &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			     unsigned long *value, char *type, char *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	struct kprobe_insn_page *kip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	int ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	list_for_each_entry_rcu(kip, &c->pages, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		if ((*symnum)--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		strlcpy(sym, c->sym, KSYM_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		*type = 't';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		*value = (unsigned long)kip->insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) #ifdef CONFIG_OPTPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) /* For optimized_kprobe buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) struct kprobe_insn_cache kprobe_optinsn_slots = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	.alloc = alloc_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	.free = free_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	.sym = KPROBE_OPTINSN_PAGE_SYM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/* .insn_size is initialized later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	.nr_garbage = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) /* We have preemption disabled.. so it is safe to use __ versions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static inline void set_kprobe_instance(struct kprobe *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	__this_cpu_write(kprobe_instance, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static inline void reset_kprobe_instance(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	__this_cpu_write(kprobe_instance, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * This routine is called either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * 	- under the kprobe_mutex - during kprobe_[un]register()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * 				OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) struct kprobe *get_kprobe(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	hlist_for_each_entry_rcu(p, head, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 				 lockdep_is_held(&kprobe_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		if (p->addr == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) NOKPROBE_SYMBOL(get_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) /* Return true if the kprobe is an aggregator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static inline int kprobe_aggrprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return p->pre_handler == aggr_pre_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) /* Return true(!0) if the kprobe is unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) static inline int kprobe_unused(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	       list_empty(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  * Keep all fields in the kprobe consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #ifdef CONFIG_OPTPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /* NOTE: change this value only with kprobe_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static bool kprobes_allow_optimization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * Call all pre_handler on the list, but ignores its return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * This must be called from arch-dep optimized caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	list_for_each_entry_rcu(kp, &p->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			set_kprobe_instance(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			kp->pre_handler(kp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		reset_kprobe_instance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) NOKPROBE_SYMBOL(opt_pre_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) /* Free optimized instructions and optimized_kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static void free_aggr_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	arch_remove_optimized_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	arch_remove_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	kfree(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) /* Return true(!0) if the kprobe is ready for optimization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static inline int kprobe_optready(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (kprobe_aggrprobe(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		return arch_prepared_optinsn(&op->optinsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static inline int kprobe_disarmed(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (!kprobe_aggrprobe(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return kprobe_disabled(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	return kprobe_disabled(p) && list_empty(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) /* Return true(!0) if the probe is queued on (un)optimizing lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) static int kprobe_queued(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (kprobe_aggrprobe(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (!list_empty(&op->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * Return an optimized kprobe whose optimizing code replaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * instructions including addr (exclude breakpoint).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static struct kprobe *get_optimized_kprobe(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct kprobe *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Don't check i == 0, since that is a breakpoint case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		p = get_kprobe((void *)(addr - i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (p && kprobe_optready(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		if (arch_within_optimized_kprobe(op, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) /* Optimization staging list, protected by kprobe_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static LIST_HEAD(optimizing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static LIST_HEAD(unoptimizing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static LIST_HEAD(freeing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static void kprobe_optimizer(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) #define OPTIMIZE_DELAY 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * Optimize (replace a breakpoint with a jump) kprobes listed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * optimizing_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) static void do_optimize_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	lockdep_assert_held(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * The optimization/unoptimization refers online_cpus via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * stop_machine() and cpu-hotplug modifies online_cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 * And same time, text_mutex will be held in cpu-hotplug and here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	 * This combination can cause a deadlock (cpu-hotplug try to lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 * text_mutex but stop_machine can not be done because online_cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 * has been changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 * To avoid this deadlock, caller must have locked cpu hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * for preventing cpu-hotplug outside of text_mutex locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/* Optimization never be done when disarmed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	    list_empty(&optimizing_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	arch_optimize_kprobes(&optimizing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * if need) kprobes listed on unoptimizing_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) static void do_unoptimize_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct optimized_kprobe *op, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	lockdep_assert_held(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/* See comment in do_optimize_kprobes() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	/* Unoptimization must be done anytime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (list_empty(&unoptimizing_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	/* Loop free_list for disarming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		/* Switching from detour code to origin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		/* Disarm probes if marked disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		if (kprobe_disabled(&op->kp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			arch_disarm_kprobe(&op->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (kprobe_unused(&op->kp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			 * Remove unused probes from hash list. After waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			 * for synchronization, these probes are reclaimed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			 * (reclaiming is done by do_free_cleaned_kprobes.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			hlist_del_rcu(&op->kp.hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) /* Reclaim all kprobes on the free_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void do_free_cleaned_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct optimized_kprobe *op, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			 * This must not happen, but if there is a kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			 * still in use, keep it on kprobes hash list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		free_aggr_kprobe(&op->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) /* Start optimizer after OPTIMIZE_DELAY passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static void kick_kprobe_optimizer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) /* Kprobe jump optimizer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void kprobe_optimizer(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * kprobes before waiting for quiesence period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	do_unoptimize_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 * Step 2: Wait for quiesence period to ensure all potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * preempted tasks to have normally scheduled. Because optprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 * may modify multiple instructions, there is a chance that Nth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * instruction is preempted. In that case, such tasks can return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 * Note that on non-preemptive kernel, this is transparently converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * to synchronoze_sched() to wait for all interrupts to have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	/* Step 3: Optimize kprobes after quiesence period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	do_optimize_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* Step 4: Free cleaned kprobes after quiesence period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	do_free_cleaned_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* Step 5: Kick optimizer again if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		kick_kprobe_optimizer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) /* Wait for completing optimization and unoptimization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) void wait_for_kprobe_optimizer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		/* this will also make optimizing_work execute immmediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		flush_delayed_work(&optimizing_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		/* @optimizing_work might not have been queued yet, relax */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static bool optprobe_queued_unopt(struct optimized_kprobe *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	struct optimized_kprobe *_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	list_for_each_entry(_op, &unoptimizing_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		if (op == _op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) /* Optimize kprobe if p is ready to be optimized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void optimize_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Check if the kprobe is disabled or not ready for optimization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	    (kprobe_disabled(p) || kprobes_all_disarmed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/* kprobes with post_handler can not be optimized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (p->post_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/* Check there is no other kprobes at the optimized instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (arch_check_optimized_kprobe(op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/* Check if it is already optimized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		if (optprobe_queued_unopt(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			/* This is under unoptimizing. Just dequeue the probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (WARN_ON_ONCE(!list_empty(&op->list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	list_add(&op->list, &optimizing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	kick_kprobe_optimizer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) /* Short cut to direct unoptimizing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) static void force_unoptimize_kprobe(struct optimized_kprobe *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	arch_unoptimize_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /* Unoptimize a kprobe if p is optimized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static void unoptimize_kprobe(struct kprobe *p, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return; /* This is not an optprobe nor optimized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (!kprobe_optimized(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (!list_empty(&op->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (optprobe_queued_unopt(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			/* Queued in unoptimizing queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			if (force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				 * Forcibly unoptimize the kprobe here, and queue it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				 * in the freeing list for release afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				force_unoptimize_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				list_move(&op->list, &freeing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			/* Dequeue from the optimizing queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/* Optimized kprobe case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		/* Forcibly update the code: this is a special case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		force_unoptimize_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		list_add(&op->list, &unoptimizing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		kick_kprobe_optimizer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) /* Cancel unoptimizing for reusing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static int reuse_unused_kprobe(struct kprobe *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 * there is still a relative jump) and disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	op = container_of(ap, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	WARN_ON_ONCE(list_empty(&op->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/* Enable the probe again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	ap->flags &= ~KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/* Optimize it again (remove from op->list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (!kprobe_optready(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	optimize_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) /* Remove optimized instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) static void kill_optimized_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (!list_empty(&op->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		/* Dequeue from the (un)optimization queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (kprobe_unused(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		/* Enqueue if it is unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		list_add(&op->list, &freeing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		 * Remove unused probes from the hash list. After waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		 * for synchronization, this probe is reclaimed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		 * (reclaiming is done by do_free_cleaned_kprobes().)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		hlist_del_rcu(&op->kp.hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	/* Don't touch the code, because it is already freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	arch_remove_optimized_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (!kprobe_ftrace(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		arch_prepare_optimized_kprobe(op, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) /* Try to prepare optimized instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) static void prepare_optimized_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	op = container_of(p, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	__prepare_optimized_kprobe(op, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) /* Allocate new optimized_kprobe and try to prepare optimized instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (!op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	INIT_LIST_HEAD(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	op->kp.addr = p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	__prepare_optimized_kprobe(op, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	return &op->kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * Prepare an optimized_kprobe and optimize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * NOTE: p must be a normal registered kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static void try_to_optimize_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct kprobe *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* Impossible to optimize ftrace-based kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (kprobe_ftrace(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	/* For preparing optimization, jump_label_text_reserved() is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	ap = alloc_aggr_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (!ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	op = container_of(ap, struct optimized_kprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (!arch_prepared_optinsn(&op->optinsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		/* If failed to setup optimizing, fallback to kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		arch_remove_optimized_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		kfree(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	init_aggr_kprobe(ap, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	optimize_kprobe(ap);	/* This just kicks optimizer thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static void optimize_all_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* If optimization is already allowed, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (kprobes_allow_optimization)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	kprobes_allow_optimization = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		hlist_for_each_entry(p, head, hlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			if (!kprobe_disabled(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				optimize_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	printk(KERN_INFO "Kprobes globally optimized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) #ifdef CONFIG_SYSCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) static void unoptimize_all_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	/* If optimization is already prohibited, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (!kprobes_allow_optimization) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	kprobes_allow_optimization = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		hlist_for_each_entry(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			if (!kprobe_disabled(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 				unoptimize_kprobe(p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/* Wait for unoptimizing completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	wait_for_kprobe_optimizer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	printk(KERN_INFO "Kprobes globally unoptimized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static DEFINE_MUTEX(kprobe_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) int sysctl_kprobes_optimization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				      void *buffer, size_t *length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				      loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	mutex_lock(&kprobe_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (sysctl_kprobes_optimization)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		optimize_all_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		unoptimize_all_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	mutex_unlock(&kprobe_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) #endif /* CONFIG_SYSCTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) /* Put a breakpoint for a probe. Must be called with text_mutex locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static void __arm_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct kprobe *_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/* Check collision with other optimized kprobes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	_p = get_optimized_kprobe((unsigned long)p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (unlikely(_p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		/* Fallback to unoptimized kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		unoptimize_kprobe(_p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	arch_arm_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static void __disarm_kprobe(struct kprobe *p, bool reopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct kprobe *_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/* Try to unoptimize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	unoptimize_kprobe(p, kprobes_all_disarmed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (!kprobe_queued(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		arch_disarm_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		/* If another kprobe was blocked, optimize it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		_p = get_optimized_kprobe((unsigned long)p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (unlikely(_p) && reopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			optimize_kprobe(_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* TODO: reoptimize others after unoptimized this probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) #else /* !CONFIG_OPTPROBES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) #define optimize_kprobe(p)			do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) #define unoptimize_kprobe(p, f)			do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) #define kill_optimized_kprobe(p)		do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) #define prepare_optimized_kprobe(p)		do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) #define try_to_optimize_kprobe(p)		do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) #define __arm_kprobe(p)				arch_arm_kprobe(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) #define kprobe_disarmed(p)			kprobe_disabled(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) #define wait_for_kprobe_optimizer()		do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static int reuse_unused_kprobe(struct kprobe *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * If the optimized kprobe is NOT supported, the aggr kprobe is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * released at the same time that the last aggregated kprobe is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * Thus there should be no chance to reuse unused kprobe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void free_aggr_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	arch_remove_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #endif /* CONFIG_OPTPROBES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) #ifdef CONFIG_KPROBES_ON_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	.func = kprobe_ftrace_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	.flags = FTRACE_OPS_FL_SAVE_REGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	.func = kprobe_ftrace_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static int kprobe_ipmodify_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static int kprobe_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Must ensure p->addr is really on ftrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static int prepare_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (!kprobe_ftrace(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		return arch_prepare_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return arch_prepare_kprobe_ftrace(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Caller must lock kprobe_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			       int *cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			 p->addr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (*cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		ret = register_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			goto err_ftrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	(*cnt)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) err_ftrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	 * At this point, sinec ops is not registered, we should be sefe from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	 * registering empty filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static int arm_kprobe_ftrace(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	bool ipmodify = (p->post_handler != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	return __arm_kprobe_ftrace(p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Caller must lock kprobe_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 				  int *cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	if (*cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		ret = unregister_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	(*cnt)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		  p->addr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int disarm_kprobe_ftrace(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	bool ipmodify = (p->post_handler != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return __disarm_kprobe_ftrace(p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) #else	/* !CONFIG_KPROBES_ON_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static inline int prepare_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	return arch_prepare_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static inline int arm_kprobe_ftrace(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static inline int disarm_kprobe_ftrace(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Arm a kprobe with text_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static int arm_kprobe(struct kprobe *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (unlikely(kprobe_ftrace(kp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		return arm_kprobe_ftrace(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	__arm_kprobe(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Disarm a kprobe with text_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static int disarm_kprobe(struct kprobe *kp, bool reopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (unlikely(kprobe_ftrace(kp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		return disarm_kprobe_ftrace(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	__disarm_kprobe(kp, reopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * Aggregate handlers for multiple kprobes support - these handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * take care of invoking the individual kprobe handlers on p->list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	list_for_each_entry_rcu(kp, &p->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			set_kprobe_instance(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			if (kp->pre_handler(kp, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		reset_kprobe_instance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) NOKPROBE_SYMBOL(aggr_pre_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			      unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	list_for_each_entry_rcu(kp, &p->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			set_kprobe_instance(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			kp->post_handler(kp, regs, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			reset_kprobe_instance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) NOKPROBE_SYMBOL(aggr_post_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			      int trapnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	 * if we faulted "during" the execution of a user specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	 * probe handler, invoke just that probe's fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (cur && cur->fault_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		if (cur->fault_handler(cur, regs, trapnr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) NOKPROBE_SYMBOL(aggr_fault_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Walks the list and increments nmissed count for multiprobe case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) void kprobes_inc_nmissed_count(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (!kprobe_aggrprobe(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		p->nmissed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		list_for_each_entry_rcu(kp, &p->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			kp->nmissed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static void recycle_rp_inst(struct kretprobe_instance *ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct kretprobe *rp = ri->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	/* remove rp inst off the rprobe_inst_table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	hlist_del(&ri->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	INIT_HLIST_NODE(&ri->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (likely(rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		raw_spin_lock(&rp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		hlist_add_head(&ri->hlist, &rp->free_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		raw_spin_unlock(&rp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		kfree_rcu(ri, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) NOKPROBE_SYMBOL(recycle_rp_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void kretprobe_hash_lock(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			 struct hlist_head **head, unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) __acquires(hlist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	raw_spinlock_t *hlist_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	*head = &kretprobe_inst_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	hlist_lock = kretprobe_table_lock_ptr(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	 * Nested is a workaround that will soon not be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	 * There's other protections that make sure the same lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	 * is not taken on the same CPU that lockdep is unaware of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	 * Differentiate when it is taken in NMI context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) NOKPROBE_SYMBOL(kretprobe_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void kretprobe_table_lock(unsigned long hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				 unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) __acquires(hlist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	 * Nested is a workaround that will soon not be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	 * There's other protections that make sure the same lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 * is not taken on the same CPU that lockdep is unaware of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * Differentiate when it is taken in NMI context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) NOKPROBE_SYMBOL(kretprobe_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void kretprobe_hash_unlock(struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			   unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) __releases(hlist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	raw_spinlock_t *hlist_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	hlist_lock = kretprobe_table_lock_ptr(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) NOKPROBE_SYMBOL(kretprobe_hash_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void kretprobe_table_unlock(unsigned long hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				   unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) __releases(hlist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) NOKPROBE_SYMBOL(kretprobe_table_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static struct kprobe kprobe_busy = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	.addr = (void *) get_kprobe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) void kprobe_busy_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	struct kprobe_ctlblk *kcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	__this_cpu_write(current_kprobe, &kprobe_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	kcb = get_kprobe_ctlblk();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) void kprobe_busy_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	__this_cpu_write(current_kprobe, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * This function is called from finish_task_switch when task tk becomes dead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * so that we can recycle any function-return probe instances associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * with this task. These left over instances represent probed functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  * that have been called but will never return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) void kprobe_flush_task(struct task_struct *tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	struct kretprobe_instance *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	unsigned long hash, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (unlikely(!kprobes_initialized))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		/* Early boot.  kretprobe_table_locks not yet initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	kprobe_busy_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	head = &kretprobe_inst_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	kretprobe_table_lock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		if (ri->task == tk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			recycle_rp_inst(ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	kretprobe_table_unlock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	kprobe_busy_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) NOKPROBE_SYMBOL(kprobe_flush_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static inline void free_rp_inst(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	struct kretprobe_instance *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		hlist_del(&ri->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		kfree(ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void cleanup_rp_inst(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	unsigned long flags, hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct kretprobe_instance *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	/* To avoid recursive kretprobe by NMI, set kprobe busy here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	kprobe_busy_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		kretprobe_table_lock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		head = &kretprobe_inst_table[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		hlist_for_each_entry_safe(ri, next, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			if (ri->rp == rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				ri->rp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		kretprobe_table_unlock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	kprobe_busy_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	free_rp_inst(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) NOKPROBE_SYMBOL(cleanup_rp_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* Add the new probe to ap->list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (p->post_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	list_add_rcu(&p->list, &ap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (p->post_handler && !ap->post_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		ap->post_handler = aggr_post_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * Fill in the required fields of the "manager kprobe". Replace the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * earlier kprobe in the hlist with the manager kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	/* Copy p's insn slot to ap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	copy_kprobe(p, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	flush_insn_slot(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	ap->addr = p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	ap->pre_handler = aggr_pre_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	ap->fault_handler = aggr_fault_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	/* We don't care the kprobe which has gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	if (p->post_handler && !kprobe_gone(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		ap->post_handler = aggr_post_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	INIT_LIST_HEAD(&ap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	INIT_HLIST_NODE(&ap->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	list_add_rcu(&p->list, &ap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	hlist_replace_rcu(&p->hlist, &ap->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  * This is the second or subsequent kprobe at the address - handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * the intricacies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct kprobe *ap = orig_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* For preparing optimization, jump_label_text_reserved() is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (!kprobe_aggrprobe(orig_p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		ap = alloc_aggr_kprobe(orig_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		if (!ap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		init_aggr_kprobe(ap, orig_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	} else if (kprobe_unused(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		/* This probe is going to die. Rescue it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		ret = reuse_unused_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (kprobe_gone(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		 * Attempting to insert new probe at the same location that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		 * had a probe in the module vaddr area which already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		 * freed. So, the instruction slot has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		 * released. We need a new slot for the new probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		ret = arch_prepare_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			 * Even if fail to allocate new slot, don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			 * free aggr_probe. It will be used next time, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			 * freed by unregister_kprobe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		/* Prepare optimized instructions if possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		prepare_optimized_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		 * Clear gone flag to prevent allocating new slot again, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		 * set disabled flag because it is not armed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			    | KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	/* Copy ap's insn slot to p */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	copy_kprobe(ap, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	ret = add_new_kprobe(ap, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		ap->flags &= ~KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (!kprobes_all_disarmed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			/* Arm the breakpoint again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			ret = arm_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 				ap->flags |= KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 				list_del_rcu(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 				synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) bool __weak arch_within_kprobe_blacklist(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	/* The __kprobes marked functions and entry code must not be probed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	return addr >= (unsigned long)__kprobes_text_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	       addr < (unsigned long)__kprobes_text_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static bool __within_kprobe_blacklist(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	struct kprobe_blacklist_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (arch_within_kprobe_blacklist(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	 * If there exists a kprobe_blacklist, verify and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	 * fail any probe registration in the prohibited area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	list_for_each_entry(ent, &kprobe_blacklist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		if (addr >= ent->start_addr && addr < ent->end_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) bool within_kprobe_blacklist(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	char symname[KSYM_NAME_LEN], *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (__within_kprobe_blacklist(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	/* Check if the address is on a suffixed-symbol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	if (!lookup_symbol_name(addr, symname)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		p = strchr(symname, '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			return __within_kprobe_blacklist(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * If we have a symbol_name argument, look it up and add the offset field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * to it. This way, we can specify a relative address to a symbol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * This returns encoded errors if it fails to look up symbol or invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  * combination of parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			const char *symbol_name, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if ((symbol_name && addr) || (!symbol_name && !addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (symbol_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		addr = kprobe_lookup_name(symbol_name, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* Check passed kprobe is valid and return kprobe in kprobe_table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static struct kprobe *__get_valid_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct kprobe *ap, *list_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	lockdep_assert_held(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	ap = get_kprobe(p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (unlikely(!ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	if (p != ap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		list_for_each_entry(list_p, &ap->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			if (list_p == p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			/* kprobe p is a valid probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 				goto valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) valid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	return ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* Return error if the kprobe is being re-registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static inline int check_kprobe_rereg(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (__get_valid_kprobe(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) int __weak arch_check_ftrace_location(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	unsigned long ftrace_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	ftrace_addr = ftrace_location((unsigned long)p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	if (ftrace_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) #ifdef CONFIG_KPROBES_ON_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		/* Given address is not on the instruction boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		if ((unsigned long)p->addr != ftrace_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		p->flags |= KPROBE_FLAG_FTRACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) #else	/* !CONFIG_KPROBES_ON_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static int check_kprobe_address_safe(struct kprobe *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 				     struct module **probed_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	ret = arch_check_ftrace_location(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	jump_label_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/* Ensure it is not in reserved area nor out of text */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (!kernel_text_address((unsigned long) p->addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	    within_kprobe_blacklist((unsigned long) p->addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	    jump_label_text_reserved(p->addr, p->addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	    static_call_text_reserved(p->addr, p->addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	    find_bug((unsigned long)p->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	/* Check if are we probing a module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	*probed_mod = __module_text_address((unsigned long) p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	if (*probed_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		 * We must hold a refcount of the probed module while updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		 * its code to prohibit unexpected unloading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		if (unlikely(!try_module_get(*probed_mod))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		 * If the module freed .init.text, we couldn't insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		 * kprobes in there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		    (*probed_mod)->state != MODULE_STATE_COMING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			module_put(*probed_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			*probed_mod = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	jump_label_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int register_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	struct kprobe *old_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	struct module *probed_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	kprobe_opcode_t *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	/* Adjust probe address from symbol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	addr = kprobe_addr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	if (IS_ERR(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		return PTR_ERR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	p->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	ret = check_kprobe_rereg(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	p->flags &= KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	p->nmissed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	INIT_LIST_HEAD(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	ret = check_kprobe_address_safe(p, &probed_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	old_p = get_kprobe(p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	if (old_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		/* Since this may unoptimize old_p, locking text_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		ret = register_aggr_kprobe(old_p, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	/* Prevent text modification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	mutex_lock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	ret = prepare_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	mutex_unlock(&text_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	INIT_HLIST_NODE(&p->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	hlist_add_head_rcu(&p->hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		ret = arm_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			hlist_del_rcu(&p->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	/* Try to optimize kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	try_to_optimize_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (probed_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		module_put(probed_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) EXPORT_SYMBOL_GPL(register_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* Check if all probes on the aggrprobe are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static int aggr_kprobe_disabled(struct kprobe *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	lockdep_assert_held(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	list_for_each_entry(kp, &ap->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		if (!kprobe_disabled(kp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			 * There is an active probe on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			 * We can't disable this ap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) static struct kprobe *__disable_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	struct kprobe *orig_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	/* Get an original kprobe for return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	orig_p = __get_valid_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (unlikely(orig_p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	if (!kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		/* Disable probe if it is a child probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		if (p != orig_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			p->flags |= KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		/* Try to disarm and disable this/parent probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			 * If kprobes_all_disarmed is set, orig_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			 * should have already been disarmed, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			 * skip unneed disarming process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			if (!kprobes_all_disarmed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 				ret = disarm_kprobe(orig_p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 				if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 					p->flags &= ~KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 					return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			orig_p->flags |= KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return orig_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * Unregister a kprobe without a scheduler synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) static int __unregister_kprobe_top(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	struct kprobe *ap, *list_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	/* Disable kprobe. This will disarm it if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	ap = __disable_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (IS_ERR(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		return PTR_ERR(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (ap == p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		 * This probe is an independent(and non-optimized) kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		 * (not an aggrprobe). Remove from the hash list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		goto disarmed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	/* Following process expects this probe is an aggrprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	WARN_ON(!kprobe_aggrprobe(ap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		 * !disarmed could be happen if the probe is under delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		 * unoptimizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		goto disarmed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		/* If disabling probe has special handlers, update aggrprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		if (p->post_handler && !kprobe_gone(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			list_for_each_entry(list_p, &ap->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 				if ((list_p != p) && (list_p->post_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 					goto noclean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			ap->post_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) noclean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		 * Remove from the aggrprobe: this path will do nothing in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		 * __unregister_kprobe_bottom().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		list_del_rcu(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			 * Try to optimize this probe again, because post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			 * handler may have been changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			optimize_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) disarmed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	hlist_del_rcu(&ap->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static void __unregister_kprobe_bottom(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	struct kprobe *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	if (list_empty(&p->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		/* This is an independent kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		arch_remove_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	else if (list_is_singular(&p->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		/* This is the last child of an aggrprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		ap = list_entry(p->list.next, struct kprobe, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		free_aggr_kprobe(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	/* Otherwise, do nothing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) int register_kprobes(struct kprobe **kps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (num <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		ret = register_kprobe(kps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 				unregister_kprobes(kps, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) EXPORT_SYMBOL_GPL(register_kprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) void unregister_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	unregister_kprobes(&p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) EXPORT_SYMBOL_GPL(unregister_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) void unregister_kprobes(struct kprobe **kps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	if (num <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	for (i = 0; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		if (__unregister_kprobe_top(kps[i]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			kps[i]->addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	for (i = 0; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		if (kps[i]->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			__unregister_kprobe_bottom(kps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) EXPORT_SYMBOL_GPL(unregister_kprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) int __weak kprobe_exceptions_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 					unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) NOKPROBE_SYMBOL(kprobe_exceptions_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static struct notifier_block kprobe_exceptions_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	.notifier_call = kprobe_exceptions_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	.priority = 0x7fffffff /* we need to be notified first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) unsigned long __weak arch_deref_entry_point(void *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	return (unsigned long)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) #ifdef CONFIG_KRETPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 					     void *trampoline_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 					     void *frame_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	struct kretprobe_instance *ri = NULL, *last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	kprobe_opcode_t *correct_ret_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	bool skipped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	kretprobe_hash_lock(current, &head, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	 * It is possible to have multiple instances associated with a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	 * task either because multiple functions in the call path have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	 * return probes installed on them, and/or more than one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	 * return probe was registered for a target function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	 * We can handle this because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	 *     - instances are always pushed into the head of the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	 *     - when multiple return probes are registered for the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	 *	 function, the (chronologically) first instance's ret_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	 *	 will be the real return address, and all the rest will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	 *	 point to kretprobe_trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	hlist_for_each_entry(ri, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		if (ri->task != current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			/* another task is sharing our hash bucket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		 * Return probes must be pushed on this hash list correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		 * order (same as return order) so that it can be popped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		 * correctly. However, if we find it is pushed it incorrect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		 * order, this means we find a function which should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		 * probed, because the wrong order entry is pushed on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		 * path of processing other kretprobe itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		if (ri->fp != frame_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 			if (!skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 			skipped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		correct_ret_addr = ri->ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		if (skipped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 				ri->rp->kp.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		if (correct_ret_addr != trampoline_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			 * This is the real return address. Any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			 * instances associated with this task are for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			 * other calls deeper on the call stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	last = ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		if (ri->task != current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 			/* another task is sharing our hash bucket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		if (ri->fp != frame_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		if (ri->rp && ri->rp->handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			struct kprobe *prev = kprobe_running();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			__this_cpu_write(current_kprobe, &ri->rp->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			ri->ret_addr = correct_ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			ri->rp->handler(ri, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			__this_cpu_write(current_kprobe, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		recycle_rp_inst(ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		if (ri == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	kretprobe_hash_unlock(current, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	return (unsigned long)correct_ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  * This kprobe pre_handler is registered with every kretprobe. When probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  * hits it will set up the return probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	unsigned long hash, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	struct kretprobe_instance *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	/* TODO: consider to only swap the RA after the last pre_handler fired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	hash = hash_ptr(current, KPROBE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	 * Nested is a workaround that will soon not be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	 * There's other protections that make sure the same lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	 * is not taken on the same CPU that lockdep is unaware of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	if (!hlist_empty(&rp->free_instances)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		ri = hlist_entry(rp->free_instances.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 				struct kretprobe_instance, hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		hlist_del(&ri->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		raw_spin_unlock_irqrestore(&rp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		ri->rp = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		ri->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			hlist_add_head(&ri->hlist, &rp->free_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			raw_spin_unlock_irqrestore(&rp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		arch_prepare_kretprobe(ri, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		/* XXX(hch): why is there no hlist_move_head? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		INIT_HLIST_NODE(&ri->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		kretprobe_table_lock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		kretprobe_table_unlock(hash, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		rp->nmissed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		raw_spin_unlock_irqrestore(&rp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) NOKPROBE_SYMBOL(pre_handler_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) bool __weak arch_kprobe_on_func_entry(unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	return !offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  * kprobe_on_func_entry() -- check whether given address is function entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * @addr: Target address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  * @sym:  Target symbol name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)  * @offset: The offset from the symbol or the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)  * This checks whether the given @addr+@offset or @sym+@offset is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)  * function entry address or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)  * This returns 0 if it is the function entry, or -EINVAL if it is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)  * And also it returns -ENOENT if it fails the symbol or address lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)  * Caller must pass @addr or @sym (either one must be NULL), or this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)  * returns -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	if (IS_ERR(kp_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		return PTR_ERR(kp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (!arch_kprobe_on_func_entry(offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) int register_kretprobe(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	struct kretprobe_instance *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	/* If only rp->kp.addr is specified, check reregistering kprobes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	if (kretprobe_blacklist_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		addr = kprobe_addr(&rp->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		if (IS_ERR(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			return PTR_ERR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 			if (kretprobe_blacklist[i].addr == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	rp->kp.pre_handler = pre_handler_kretprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	rp->kp.post_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	rp->kp.fault_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	/* Pre-allocate memory for max kretprobe instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	if (rp->maxactive <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		rp->maxactive = num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	raw_spin_lock_init(&rp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	INIT_HLIST_HEAD(&rp->free_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	for (i = 0; i < rp->maxactive; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		inst = kmalloc(sizeof(struct kretprobe_instance) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			       rp->data_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		if (inst == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			free_rp_inst(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		INIT_HLIST_NODE(&inst->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		hlist_add_head(&inst->hlist, &rp->free_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	rp->nmissed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	/* Establish function entry probe point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	ret = register_kprobe(&rp->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		free_rp_inst(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) EXPORT_SYMBOL_GPL(register_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int register_kretprobes(struct kretprobe **rps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	if (num <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		ret = register_kretprobe(rps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				unregister_kretprobes(rps, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) EXPORT_SYMBOL_GPL(register_kretprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) void unregister_kretprobe(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	unregister_kretprobes(&rp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) EXPORT_SYMBOL_GPL(unregister_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) void unregister_kretprobes(struct kretprobe **rps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	if (num <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	for (i = 0; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			rps[i]->kp.addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		if (rps[i]->kp.addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			__unregister_kprobe_bottom(&rps[i]->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			cleanup_rp_inst(rps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) EXPORT_SYMBOL_GPL(unregister_kretprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) #else /* CONFIG_KRETPROBES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) int register_kretprobe(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) EXPORT_SYMBOL_GPL(register_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) int register_kretprobes(struct kretprobe **rps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) EXPORT_SYMBOL_GPL(register_kretprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) void unregister_kretprobe(struct kretprobe *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) EXPORT_SYMBOL_GPL(unregister_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) void unregister_kretprobes(struct kretprobe **rps, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) EXPORT_SYMBOL_GPL(unregister_kretprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) NOKPROBE_SYMBOL(pre_handler_kretprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) #endif /* CONFIG_KRETPROBES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /* Set the kprobe gone and remove its instruction buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) static void kill_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	struct kprobe *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	lockdep_assert_held(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	if (WARN_ON_ONCE(kprobe_gone(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	p->flags |= KPROBE_FLAG_GONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	if (kprobe_aggrprobe(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		 * If this is an aggr_kprobe, we have to list all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		 * chained probes and mark them GONE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		list_for_each_entry(kp, &p->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			kp->flags |= KPROBE_FLAG_GONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		p->post_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		kill_optimized_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 * Here, we can remove insn_slot safely, because no thread calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	 * the original probed function (which will be freed soon) any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	arch_remove_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	 * The module is going away. We should disarm the kprobe which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	 * is using ftrace, because ftrace framework is still available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 * MODULE_STATE_GOING notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		disarm_kprobe_ftrace(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /* Disable one kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) int disable_kprobe(struct kprobe *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	/* Disable this kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	p = __disable_kprobe(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		ret = PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) EXPORT_SYMBOL_GPL(disable_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) /* Enable one kprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int enable_kprobe(struct kprobe *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	/* Check whether specified probe is valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	p = __get_valid_kprobe(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	if (unlikely(p == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	if (kprobe_gone(kp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		/* This kprobe has gone, we couldn't enable it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	if (p != kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		kp->flags &= ~KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		p->flags &= ~KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		ret = arm_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 			p->flags |= KPROBE_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) EXPORT_SYMBOL_GPL(enable_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /* Caller must NOT call this in usual path. This is only for critical case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) void dump_kprobe(struct kprobe *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	pr_err("Dumping kprobe:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	       kp->symbol_name, kp->offset, kp->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) NOKPROBE_SYMBOL(dump_kprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) int kprobe_add_ksym_blacklist(unsigned long entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	struct kprobe_blacklist_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	unsigned long offset = 0, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	if (!kernel_text_address(entry) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	    !kallsyms_lookup_size_offset(entry, &size, &offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	ent->start_addr = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	ent->end_addr = entry + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	INIT_LIST_HEAD(&ent->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	list_add_tail(&ent->list, &kprobe_blacklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	return (int)size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* Add all symbols in given area into kprobe blacklist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	for (entry = start; entry < end; entry += ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		ret = kprobe_add_ksym_blacklist(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		if (ret == 0)	/* In case of alias symbol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) /* Remove all symbols in given area from kprobe blacklist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	struct kprobe_blacklist_entry *ent, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		if (ent->start_addr < start || ent->start_addr >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		list_del(&ent->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		kfree(ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static void kprobe_remove_ksym_blacklist(unsigned long entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	kprobe_remove_area_blacklist(entry, entry + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 				   char *type, char *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		       char *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #ifdef CONFIG_OPTPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) int __init __weak arch_populate_kprobe_blacklist(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)  * Lookup and populate the kprobe_blacklist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)  * Unlike the kretprobe blacklist, we'll need to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)  * the range of addresses that belong to the said functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)  * since a kprobe need not necessarily be at the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)  * of a function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) static int __init populate_kprobe_blacklist(unsigned long *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 					     unsigned long *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	unsigned long *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	for (iter = start; iter < end; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		entry = arch_deref_entry_point((void *)*iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		ret = kprobe_add_ksym_blacklist(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		if (ret == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	/* Symbols in __kprobes_text are blacklisted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 					(unsigned long)__kprobes_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	/* Symbols in noinstr section are blacklisted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 					(unsigned long)__noinstr_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	return ret ? : arch_populate_kprobe_blacklist();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static void add_module_kprobe_blacklist(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	if (mod->kprobe_blacklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 			kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	start = (unsigned long)mod->kprobes_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		end = start + mod->kprobes_text_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		kprobe_add_area_blacklist(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	start = (unsigned long)mod->noinstr_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		end = start + mod->noinstr_text_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		kprobe_add_area_blacklist(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static void remove_module_kprobe_blacklist(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	if (mod->kprobe_blacklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	start = (unsigned long)mod->kprobes_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		end = start + mod->kprobes_text_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		kprobe_remove_area_blacklist(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	start = (unsigned long)mod->noinstr_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		end = start + mod->noinstr_text_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		kprobe_remove_area_blacklist(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /* Module notifier call back, checking kprobes on the module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static int kprobes_module_callback(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 				   unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	struct module *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	int checkcore = (val == MODULE_STATE_GOING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (val == MODULE_STATE_COMING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		add_module_kprobe_blacklist(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	 * When MODULE_STATE_GOING was notified, both of module .text and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	 * notified, only .init.text section would be freed. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	 * disable kprobes which have been inserted in the sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		hlist_for_each_entry(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 			if (kprobe_gone(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			if (within_module_init((unsigned long)p->addr, mod) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			    (checkcore &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 			     within_module_core((unsigned long)p->addr, mod))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 				 * The vaddr this probe is installed will soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 				 * be vfreed buy not synced to disk. Hence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 				 * disarming the breakpoint isn't needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 				 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 				 * Note, this will also move any optimized probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 				 * that are pending to be removed from their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				 * corresponding lists to the freeing_list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 				 * will not be touched by the delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 				 * kprobe_optimizer work handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 				kill_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	if (val == MODULE_STATE_GOING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		remove_module_kprobe_blacklist(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static struct notifier_block kprobe_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	.notifier_call = kprobes_module_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	.priority = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) /* Markers of _kprobe_blacklist section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) extern unsigned long __start_kprobe_blacklist[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) extern unsigned long __stop_kprobe_blacklist[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) void kprobe_free_init_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	void *start = (void *)(&__init_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	void *end = (void *)(&__init_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	/* Kill all kprobes on initmem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 		hlist_for_each_entry(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 			if (start <= (void *)p->addr && (void *)p->addr < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 				kill_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) static int __init init_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	/* FIXME allocate the probe table, currently defined statically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	/* initialize all list heads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		INIT_HLIST_HEAD(&kprobe_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 					__stop_kprobe_blacklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		pr_err("kprobes: failed to populate blacklist: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		pr_err("Please take care of using kprobes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	if (kretprobe_blacklist_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		/* lookup the function address from its name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			kretprobe_blacklist[i].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 			if (!kretprobe_blacklist[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 				printk("kretprobe: lookup failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 				       kretprobe_blacklist[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	/* By default, kprobes are armed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	kprobes_all_disarmed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	/* Init kprobe_optinsn_slots for allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	err = arch_init_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		err = register_die_notifier(&kprobe_exceptions_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		err = register_module_notifier(&kprobe_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	kprobes_initialized = (err == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		init_test_probes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) early_initcall(init_kprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) #if defined(CONFIG_OPTPROBES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) static int __init init_optprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	 * Enable kprobe optimization - this kicks the optimizer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	 * not spawned in early initcall. So delay the optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	optimize_all_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) subsys_initcall(init_optprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static void report_probe(struct seq_file *pi, struct kprobe *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		const char *sym, int offset, char *modname, struct kprobe *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	char *kprobe_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	void *addr = p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (p->pre_handler == pre_handler_kretprobe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		kprobe_type = "r";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		kprobe_type = "k";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	if (!kallsyms_show_value(pi->file->f_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	if (sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		seq_printf(pi, "%px  %s  %s+0x%x  %s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			addr, kprobe_type, sym, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 			(modname ? modname : " "));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	else	/* try to use %pS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		seq_printf(pi, "%px  %s  %pS ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 			addr, kprobe_type, p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	if (!pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		pp = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	seq_printf(pi, "%s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		(kprobe_gone(p) ? "[GONE]" : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	if (*pos >= KPROBE_TABLE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) static void kprobe_seq_stop(struct seq_file *f, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	/* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) static int show_kprobe_addr(struct seq_file *pi, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	struct kprobe *p, *kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	const char *sym = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	unsigned int i = *(loff_t *) v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	unsigned long offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	char *modname, namebuf[KSYM_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	hlist_for_each_entry_rcu(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 					&offset, &modname, namebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		if (kprobe_aggrprobe(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			list_for_each_entry_rcu(kp, &p->list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 				report_probe(pi, kp, sym, offset, modname, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 			report_probe(pi, p, sym, offset, modname, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) static const struct seq_operations kprobes_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	.start = kprobe_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	.next  = kprobe_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	.stop  = kprobe_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	.show  = show_kprobe_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) DEFINE_SEQ_ATTRIBUTE(kprobes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* kprobes/blacklist -- shows which functions can not be probed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	return seq_list_start(&kprobe_blacklist, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	return seq_list_next(v, &kprobe_blacklist, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	struct kprobe_blacklist_entry *ent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		list_entry(v, struct kprobe_blacklist_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	 * If /proc/kallsyms is not showing kernel address, we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	 * show them here either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	if (!kallsyms_show_value(m->file->f_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 			   (void *)ent->start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 			   (void *)ent->end_addr, (void *)ent->start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) static const struct seq_operations kprobe_blacklist_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	.start = kprobe_blacklist_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	.next  = kprobe_blacklist_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	.stop  = kprobe_blacklist_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	.show  = kprobe_blacklist_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) static int arm_all_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	unsigned int i, total = 0, errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	/* If kprobes are armed, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	if (!kprobes_all_disarmed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		goto already_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	 * optimize_kprobe() called by arm_kprobe() checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	 * arm_kprobe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	kprobes_all_disarmed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	/* Arming kprobes doesn't optimize kprobe itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		/* Arm all kprobes on a best-effort basis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		hlist_for_each_entry(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 			if (!kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 				err = arm_kprobe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 				if (err)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 					errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 					ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 				total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	if (errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 			errors, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		pr_info("Kprobes globally enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) already_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) static int disarm_all_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	unsigned int i, total = 0, errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	mutex_lock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	/* If kprobes are already disarmed, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	if (kprobes_all_disarmed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	kprobes_all_disarmed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		head = &kprobe_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		/* Disarm all kprobes on a best-effort basis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		hlist_for_each_entry(p, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 				err = disarm_kprobe(p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 				if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 					errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 					ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 				total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	if (errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 			errors, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		pr_info("Kprobes globally disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	mutex_unlock(&kprobe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	/* Wait for disarming all kprobes by optimizer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	wait_for_kprobe_optimizer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)  * XXX: The debugfs bool file interface doesn't allow for callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)  * when the bool state is switched. We can reuse that facility when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)  * available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) static ssize_t read_enabled_file_bool(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	       char __user *user_buf, size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	char buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	if (!kprobes_all_disarmed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		buf[0] = '1';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		buf[0] = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	buf[1] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	buf[2] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) static ssize_t write_enabled_file_bool(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	       const char __user *user_buf, size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	size_t buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	buf_size = min(count, (sizeof(buf)-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	if (copy_from_user(buf, user_buf, buf_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	buf[buf_size] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	switch (buf[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	case 'y':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	case 'Y':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	case '1':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		ret = arm_all_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	case 'n':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	case 'N':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	case '0':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		ret = disarm_all_kprobes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) static const struct file_operations fops_kp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	.read =         read_enabled_file_bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	.write =        write_enabled_file_bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	.llseek =	default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static int __init debugfs_kprobe_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	dir = debugfs_create_dir("kprobes", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	debugfs_create_file("blacklist", 0400, dir, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			    &kprobe_blacklist_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) late_initcall(debugfs_kprobe_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) #endif /* CONFIG_DEBUG_FS */