^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * patch.c - livepatch patching functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2014 SUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/livepatch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "patch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "transition.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static LIST_HEAD(klp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct klp_ops *klp_find_ops(void *old_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct klp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) list_for_each_entry(ops, &klp_ops, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) func = list_first_entry(&ops->func_stack, struct klp_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (func->old_func == old_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static void notrace klp_ftrace_handler(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct ftrace_ops *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct klp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int patch_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ops = container_of(fops, struct klp_ops, fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * A variant of synchronize_rcu() is used to allow patching functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * where RCU is not watching, see klp_synchronize_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * func should never be NULL because preemption should be disabled here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * and unregister_ftrace_function() does the equivalent of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * synchronize_rcu() before the func_stack removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (WARN_ON_ONCE(!func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * In the enable path, enforce the order of the ops->func_stack and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * func->transition reads. The corresponding write barrier is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * __klp_enable_patch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * (Note that this barrier technically isn't needed in the disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * path. In the rare case where klp_update_patch_state() runs before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * this handler, its TIF_PATCH_PENDING read and this func->transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * read need to be ordered. But klp_update_patch_state() already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * enforces that.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (unlikely(func->transition)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Enforce the order of the func->transition and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * current->patch_state reads. Otherwise we could read an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * out-of-date task state and pick the wrong function. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * corresponding write barrier is in klp_init_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) patch_state = current->patch_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (patch_state == KLP_UNPATCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Use the previously patched version of the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * If no previous patches exist, continue with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * original function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) func = list_entry_rcu(func->stack_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct klp_func, stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (&func->stack_node == &ops->func_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * NOPs are used to replace existing patches with original code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Do nothing! Setting pc would cause an infinite loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (func->nop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) klp_arch_set_pc(regs, (unsigned long)func->new_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Convert a function address into the appropriate ftrace location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Usually this is just the address of the function, but on some architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * it's more complicated so allow them to provide a custom behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifndef klp_get_ftrace_location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static unsigned long klp_get_ftrace_location(unsigned long faddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void klp_unpatch_func(struct klp_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct klp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (WARN_ON(!func->patched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (WARN_ON(!func->old_func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ops = klp_find_ops(func->old_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (WARN_ON(!ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (list_is_singular(&ops->func_stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long ftrace_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ftrace_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) klp_get_ftrace_location((unsigned long)func->old_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (WARN_ON(!ftrace_loc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) WARN_ON(unregister_ftrace_function(&ops->fops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) list_del_rcu(&func->stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) list_del(&ops->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) kfree(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) list_del_rcu(&func->stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) func->patched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int klp_patch_func(struct klp_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct klp_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (WARN_ON(!func->old_func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (WARN_ON(func->patched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ops = klp_find_ops(func->old_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long ftrace_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ftrace_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) klp_get_ftrace_location((unsigned long)func->old_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!ftrace_loc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pr_err("failed to find location for function '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) func->old_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ops = kzalloc(sizeof(*ops), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ops->fops.func = klp_ftrace_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) FTRACE_OPS_FL_DYNAMIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) FTRACE_OPS_FL_IPMODIFY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) FTRACE_OPS_FL_PERMANENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) list_add(&ops->node, &klp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) INIT_LIST_HEAD(&ops->func_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list_add_rcu(&func->stack_node, &ops->func_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pr_err("failed to set ftrace filter for function '%s' (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) func->old_name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = register_ftrace_function(&ops->fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pr_err("failed to register ftrace handler for function '%s' (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) func->old_name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_add_rcu(&func->stack_node, &ops->func_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) func->patched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) list_del_rcu(&func->stack_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) list_del(&ops->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) kfree(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) klp_for_each_func(obj, func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (nops_only && !func->nop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (func->patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) klp_unpatch_func(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (obj->dynamic || !nops_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) obj->patched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void klp_unpatch_object(struct klp_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) __klp_unpatch_object(obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int klp_patch_object(struct klp_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct klp_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (WARN_ON(obj->patched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) klp_for_each_func(obj, func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret = klp_patch_func(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) klp_unpatch_object(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) obj->patched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct klp_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) klp_for_each_object(patch, obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (obj->patched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __klp_unpatch_object(obj, nops_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) void klp_unpatch_objects(struct klp_patch *patch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __klp_unpatch_objects(patch, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void klp_unpatch_objects_dynamic(struct klp_patch *patch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __klp_unpatch_objects(patch, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }