^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * transition.c - Kernel Live Patching transition functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "patch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "transition.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "../sched/sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define MAX_STACK_ENTRIES 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define STACK_ERR_BUF_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SIGNALS_TIMEOUT 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct klp_patch *klp_transition_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int klp_target_state = KLP_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned int klp_signals_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * This work can be performed periodically to finish patching or unpatching any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * "straggler" tasks which failed to transition in the first attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void klp_transition_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) mutex_lock(&klp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (klp_transition_patch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) klp_try_complete_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) mutex_unlock(&klp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * This function is just a stub to implement a hard force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * of synchronize_rcu(). This requires synchronizing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * tasks even in userspace and idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void klp_sync(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * We allow to patch also functions where RCU is not watching,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * e.g. before user_exit(). We can not rely on the RCU infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * to do the synchronization. Instead hard force the sched synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * This approach allows to use RCU functions for manipulating func_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void klp_synchronize_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) schedule_on_each_cpu(klp_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * The transition to the target patch state is complete. Clean up the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void klp_complete_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct klp_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pr_debug("'%s': completing %s transition\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) klp_transition_patch->mod->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) klp_unpatch_replaced_patches(klp_transition_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) klp_discard_nops(klp_transition_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (klp_target_state == KLP_UNPATCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * All tasks have transitioned to KLP_UNPATCHED so we can now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * remove the new functions from the func_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) klp_unpatch_objects(klp_transition_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Make sure klp_ftrace_handler() can no longer see functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * from this patch on the ops->func_stack. Otherwise, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * func->transition gets cleared, the handler may choose a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * removed function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) klp_synchronize_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) klp_for_each_object(klp_transition_patch, obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) klp_for_each_func(obj, func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) func->transition = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (klp_target_state == KLP_PATCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) klp_synchronize_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for_each_process_thread(g, task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) task->patch_state = KLP_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) task = idle_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) task->patch_state = KLP_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) klp_for_each_object(klp_transition_patch, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!klp_is_object_loaded(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (klp_target_state == KLP_PATCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) klp_post_patch_callback(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) else if (klp_target_state == KLP_UNPATCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) klp_post_unpatch_callback(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) klp_target_state = KLP_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) klp_transition_patch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * This is called in the error path, to cancel a transition before it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * started, i.e. klp_init_transition() has been called but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * klp_start_transition() hasn't. If the transition *has* been started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * klp_reverse_transition() should be used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void klp_cancel_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_debug("'%s': canceling patching transition, going to unpatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) klp_transition_patch->mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) klp_target_state = KLP_UNPATCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) klp_complete_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Switch the patched state of the task to the set of functions in the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * patch state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * NOTE: If task is not 'current', the caller must ensure the task is inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void klp_update_patch_state(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * A variant of synchronize_rcu() is used to allow patching functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * where RCU is not watching, see klp_synchronize_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * This test_and_clear_tsk_thread_flag() call also serves as a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * barrier (smp_rmb) for two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * 1) Enforce the order of the TIF_PATCH_PENDING read and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * klp_target_state read. The corresponding write barrier is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * klp_init_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * of func->transition, if klp_ftrace_handler() is called later on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * the same CPU. See __klp_disable_patch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) task->patch_state = READ_ONCE(klp_target_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Determine whether the given stack trace includes any references to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * to-be-patched or to-be-unpatched function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long func_addr, func_size, address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct klp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) for (i = 0; i < nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) address = entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (klp_target_state == KLP_UNPATCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Check for the to-be-unpatched function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * (the func itself).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) func_addr = (unsigned long)func->new_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) func_size = func->new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Check for the to-be-patched function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * (the previous func).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ops = klp_find_ops(func->old_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (list_is_singular(&ops->func_stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* original function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) func_addr = (unsigned long)func->old_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) func_size = func->old_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* previously patched function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct klp_func *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) prev = list_next_entry(func, stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) func_addr = (unsigned long)prev->new_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) func_size = prev->new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (address >= func_addr && address < func_addr + func_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Determine whether it's safe to transition the task to the target patch state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * by looking for any to-be-patched or to-be-unpatched functions on its stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int klp_check_stack(struct task_struct *task, char *err_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static unsigned long entries[MAX_STACK_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct klp_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int ret, nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) snprintf(err_buf, STACK_ERR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "%s: %s:%d has an unreliable stack\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) __func__, task->comm, task->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) nr_entries = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) klp_for_each_object(klp_transition_patch, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!obj->patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) klp_for_each_func(obj, func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret = klp_check_stack_func(func, entries, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) snprintf(err_buf, STACK_ERR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) "%s: %s:%d is sleeping on function %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) __func__, task->comm, task->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) func->old_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Try to safely switch a task to the target patch state. If it's currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * if the stack is unreliable, return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static bool klp_try_switch_task(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static char err_buf[STACK_ERR_BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rq_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bool success = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) err_buf[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* check if this task has already switched over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (task->patch_state == klp_target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * For arches which don't have reliable stack traces, we have to rely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * on other methods (e.g., switching tasks at kernel exit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!klp_have_reliable_stack())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Now try to check the stack for any to-be-patched or to-be-unpatched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * functions. If all goes well, switch the task to the target patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) rq = task_rq_lock(task, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (task_running(rq, task) && task != current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) snprintf(err_buf, STACK_ERR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) "%s: %s:%d is running\n", __func__, task->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) task->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ret = klp_check_stack(task, err_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) task->patch_state = klp_target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) task_rq_unlock(rq, task, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Due to console deadlock issues, pr_debug() can't be used while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * holding the task rq lock. Instead we have to use a temporary buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * and print the debug message after releasing the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (err_buf[0] != '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_debug("%s", err_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Kthreads with TIF_PATCH_PENDING set are woken up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void klp_send_signals(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (klp_signals_cnt == SIGNALS_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pr_notice("signaling remaining tasks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for_each_process_thread(g, task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!klp_patch_pending(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * There is a small race here. We could see TIF_PATCH_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * set and decide to wake up a kthread or send a fake signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Meanwhile the task could migrate itself and the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * would be meaningless. It is not serious though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (task->flags & PF_KTHREAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Wake up a kthread which sleeps interruptedly and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * still has not been migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) wake_up_state(task, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Send fake signal to all non-kthread tasks which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * still not migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) signal_wake_up(task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_unlock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Try to switch all remaining tasks to the target patch state by walking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * stacks of sleeping tasks and looking for any to-be-patched or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * to-be-unpatched functions. If such functions are found, the task can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * switched yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * If any tasks are still stuck in the initial patch state, schedule a retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) void klp_try_complete_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct klp_patch *patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bool complete = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Try to switch the tasks to the target patch state by walking their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * stacks and looking for any to-be-patched or to-be-unpatched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * functions. If such functions are found on a stack, or if the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * is deemed unreliable, the task can't be switched yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Usually this will transition most (or all) of the tasks on a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * unless the patch includes changes to a very common function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) for_each_process_thread(g, task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!klp_try_switch_task(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Ditto for the idle "swapper" tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) task = idle_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (cpu_online(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!klp_try_switch_task(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else if (task->patch_state != klp_target_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* offline idle tasks can be switched immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) task->patch_state = klp_target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) klp_send_signals();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) klp_signals_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Some tasks weren't able to be switched over. Try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * later and/or wait for other methods like kernel exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * switching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) schedule_delayed_work(&klp_transition_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) round_jiffies_relative(HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* we're done, now cleanup the data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) patch = klp_transition_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) klp_complete_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * It would make more sense to free the unused patches in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * klp_complete_transition() but it is called also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * from klp_cancel_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!patch->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) klp_free_patch_async(patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else if (patch->replace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) klp_free_replaced_patches_async(patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Start the transition to the specified target patch state so tasks can begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * switching to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void klp_start_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pr_notice("'%s': starting %s transition\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) klp_transition_patch->mod->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Mark all normal tasks as needing a patch state update. They'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * switch either in klp_try_complete_transition() or as they exit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) for_each_process_thread(g, task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (task->patch_state != klp_target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) set_tsk_thread_flag(task, TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Mark all idle tasks as needing a patch state update. They'll switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * either in klp_try_complete_transition() or at the idle loop switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) task = idle_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (task->patch_state != klp_target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) set_tsk_thread_flag(task, TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) klp_signals_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Initialize the global target patch state and all tasks to the initial patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * state, and initialize all function transition states to true in preparation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * for patching or unpatching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) void klp_init_transition(struct klp_patch *patch, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct klp_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int initial_state = !state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) klp_transition_patch = patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Set the global target patch state which tasks will switch to. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * has no effect until the TIF_PATCH_PENDING flags get set later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) klp_target_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pr_debug("'%s': initializing %s transition\n", patch->mod->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Initialize all tasks to the initial patch state to prepare them for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * switching to the target state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) for_each_process_thread(g, task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) task->patch_state = initial_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Ditto for the idle "swapper" tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) task = idle_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) task->patch_state = initial_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Enforce the order of the task->patch_state initializations and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * func->transition updates to ensure that klp_ftrace_handler() doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * see a func in transition with a task->patch_state of KLP_UNDEFINED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Also enforce the order of the klp_target_state write and future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * set a task->patch_state to KLP_UNDEFINED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Set the func transition states so klp_ftrace_handler() will know to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * switch to the transition logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * When patching, the funcs aren't yet in the func_stack and will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * made visible to the ftrace handler shortly by the calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * klp_patch_object().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * When unpatching, the funcs are already in the func_stack and so are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * already visible to the ftrace handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) klp_for_each_object(patch, obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) klp_for_each_func(obj, func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) func->transition = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * This function can be called in the middle of an existing transition to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * reverse the direction of the target patch state. This can be done to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * effectively cancel an existing enable or disable operation if there are any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * tasks which are stuck in the initial patch state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void klp_reverse_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pr_debug("'%s': reversing transition from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) klp_transition_patch->mod->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) klp_target_state == KLP_PATCHED ? "patching to unpatching" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) "unpatching to patching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) klp_transition_patch->enabled = !klp_transition_patch->enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) klp_target_state = !klp_target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Clear all TIF_PATCH_PENDING flags to prevent races caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * klp_update_patch_state() running in parallel with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * klp_start_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) for_each_process_thread(g, task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Let any remaining calls to klp_update_patch_state() complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) klp_synchronize_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) klp_start_transition();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Called from copy_process() during fork */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void klp_copy_process(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) child->patch_state = current->patch_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * existing transition to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * NOTE: klp_update_patch_state(task) requires the task to be inactive or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * 'current'. This is not the case here and the consistency model could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * broken. Administrator, who is the only one to execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * klp_force_transitions(), has to be aware of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void klp_force_transition(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct klp_patch *patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct task_struct *g, *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pr_warn("forcing remaining tasks to the patched state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for_each_process_thread(g, task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) klp_update_patch_state(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) klp_update_patch_state(idle_task(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) klp_for_each_patch(patch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) patch->forced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }