Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * ARMv8 single-step debug support and mdscr context switching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/system_misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /* Determine debug architecture. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) u8 debug_monitors_arch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 						ID_AA64DFR0_DEBUGVER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * MDSCR access routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void mdscr_write(u32 mdscr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	flags = local_daif_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	write_sysreg(mdscr, mdscr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	local_daif_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) NOKPROBE_SYMBOL(mdscr_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static u32 mdscr_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	return read_sysreg(mdscr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) NOKPROBE_SYMBOL(mdscr_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * Allow root to disable self-hosted debug from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * This is useful if you want to connect an external JTAG debugger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static bool debug_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static int create_debug_debugfs_entry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) fs_initcall(create_debug_debugfs_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int __init early_debug_disable(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	debug_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) early_param("nodebugmon", early_debug_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * Keep track of debug users on each core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * The ref counts are per-cpu so we use a local_t type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static DEFINE_PER_CPU(int, mde_ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static DEFINE_PER_CPU(int, kde_ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void enable_debug_monitors(enum dbg_active_el el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32 mdscr, enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (this_cpu_inc_return(mde_ref_count) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		enable = DBG_MDSCR_MDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (el == DBG_ACTIVE_EL1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	    this_cpu_inc_return(kde_ref_count) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		enable |= DBG_MDSCR_KDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (enable && debug_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		mdscr = mdscr_read();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		mdscr |= enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		mdscr_write(mdscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) NOKPROBE_SYMBOL(enable_debug_monitors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void disable_debug_monitors(enum dbg_active_el el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u32 mdscr, disable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (this_cpu_dec_return(mde_ref_count) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		disable = ~DBG_MDSCR_MDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (el == DBG_ACTIVE_EL1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	    this_cpu_dec_return(kde_ref_count) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		disable &= ~DBG_MDSCR_KDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		mdscr = mdscr_read();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		mdscr &= disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		mdscr_write(mdscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) NOKPROBE_SYMBOL(disable_debug_monitors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * OS lock clearing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int clear_os_lock(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	write_sysreg(0, osdlr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	write_sysreg(0, oslar_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int __init debug_monitors_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 				 "arm64/debug_monitors:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 				 clear_os_lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) postcore_initcall(debug_monitors_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * Single step API and exception handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	regs->pstate |= DBG_SPSR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	regs->pstate &= ~DBG_SPSR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define set_regs_spsr_ss(r)	set_user_regs_spsr_ss(&(r)->user_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define clear_regs_spsr_ss(r)	clear_user_regs_spsr_ss(&(r)->user_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static DEFINE_SPINLOCK(debug_hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static LIST_HEAD(user_step_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static LIST_HEAD(kernel_step_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void register_debug_hook(struct list_head *node, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	spin_lock(&debug_hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	list_add_rcu(node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	spin_unlock(&debug_hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void unregister_debug_hook(struct list_head *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	spin_lock(&debug_hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	list_del_rcu(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	spin_unlock(&debug_hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void register_user_step_hook(struct step_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	register_debug_hook(&hook->node, &user_step_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void unregister_user_step_hook(struct step_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unregister_debug_hook(&hook->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void register_kernel_step_hook(struct step_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	register_debug_hook(&hook->node, &kernel_step_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void unregister_kernel_step_hook(struct step_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	unregister_debug_hook(&hook->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * Call registered single step handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * There is no Syndrome info to check for determining the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * So we call all the registered handlers, until the right handler is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * found which returns zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int call_step_hook(struct pt_regs *regs, unsigned int esr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct step_hook *hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	int retval = DBG_HOOK_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * Since single-step exception disables interrupt, this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * entirely not preemptible, and we can use rcu list safely here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	list_for_each_entry_rcu(hook, list, node)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		retval = hook->fn(regs, esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		if (retval == DBG_HOOK_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) NOKPROBE_SYMBOL(call_step_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void send_user_sigtrap(int si_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (WARN_ON(!user_mode(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (interrupts_enabled(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			      "User debug trap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int single_step_handler(unsigned long unused, unsigned int esr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			       struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	bool handler_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	 * If we are stepping a pending breakpoint, call the hw_breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	 * handler first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!reinstall_suspended_bps(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		handler_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (!handler_found && user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		send_user_sigtrap(TRAP_TRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		 * ptrace will disable single step unless explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		 * asked to re-enable it. For other clients, it makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		 * sense to leave it enabled (i.e. rewind the controls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		 * to the active-not-pending state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		user_rewind_single_step(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	} else if (!handler_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		pr_warn("Unexpected kernel single-step exception at EL1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		 * Re-enable stepping since we know that we will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		 * returning to regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		set_regs_spsr_ss(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) NOKPROBE_SYMBOL(single_step_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static LIST_HEAD(user_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static LIST_HEAD(kernel_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void register_user_break_hook(struct break_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	register_debug_hook(&hook->node, &user_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) EXPORT_SYMBOL_GPL(register_user_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) void unregister_user_break_hook(struct break_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	unregister_debug_hook(&hook->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) EXPORT_SYMBOL_GPL(unregister_user_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void register_kernel_break_hook(struct break_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	register_debug_hook(&hook->node, &kernel_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) EXPORT_SYMBOL_GPL(register_kernel_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void unregister_kernel_break_hook(struct break_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	unregister_debug_hook(&hook->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) EXPORT_SYMBOL_GPL(unregister_kernel_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int call_break_hook(struct pt_regs *regs, unsigned int esr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct break_hook *hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * Since brk exception disables interrupt, this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 * entirely not preemptible, and we can use rcu list safely here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	list_for_each_entry_rcu(hook, list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		if ((comment & ~hook->mask) == hook->imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			fn = hook->fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) NOKPROBE_SYMBOL(call_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int brk_handler(unsigned long unused, unsigned int esr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		       struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		send_user_sigtrap(TRAP_BRKPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		pr_warn("Unexpected kernel BRK exception at EL1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) NOKPROBE_SYMBOL(brk_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int aarch32_break_handler(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	u32 arm_instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	u16 thumb_instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	bool bp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	void __user *pc = (void __user *)instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (!compat_user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (compat_thumb_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		/* get 16-bit Thumb instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		__le16 instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		get_user(instr, (__le16 __user *)pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		thumb_instr = le16_to_cpu(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			/* get second half of 32-bit Thumb-2 instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			get_user(instr, (__le16 __user *)(pc + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			thumb_instr = le16_to_cpu(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			bp = thumb_instr == AARCH32_BREAK_THUMB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		/* 32-bit ARM instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		__le32 instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		get_user(instr, (__le32 __user *)pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		arm_instr = le32_to_cpu(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	send_user_sigtrap(TRAP_BRKPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) NOKPROBE_SYMBOL(aarch32_break_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void __init debug_traps_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			      TRAP_TRACE, "single-step handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			      TRAP_BRKPT, "BRK handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Re-enable single step for syscall restarting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) void user_rewind_single_step(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	 * If single step is active for this thread, then set SPSR.SS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * to 1 to avoid returning to the active-pending state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		set_regs_spsr_ss(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) NOKPROBE_SYMBOL(user_rewind_single_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void user_fastforward_single_step(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		clear_regs_spsr_ss(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) void user_regs_reset_single_step(struct user_pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				 struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		set_user_regs_spsr_ss(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		clear_user_regs_spsr_ss(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Kernel API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void kernel_enable_single_step(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	set_regs_spsr_ss(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	mdscr_write(mdscr_read() | DBG_MDSCR_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	enable_debug_monitors(DBG_ACTIVE_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) NOKPROBE_SYMBOL(kernel_enable_single_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) void kernel_disable_single_step(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	disable_debug_monitors(DBG_ACTIVE_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) NOKPROBE_SYMBOL(kernel_disable_single_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int kernel_active_single_step(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return mdscr_read() & DBG_MDSCR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) NOKPROBE_SYMBOL(kernel_active_single_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* ptrace API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) void user_enable_single_step(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct thread_info *ti = task_thread_info(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		set_regs_spsr_ss(task_pt_regs(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) NOKPROBE_SYMBOL(user_enable_single_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void user_disable_single_step(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) NOKPROBE_SYMBOL(user_disable_single_step);