^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008-2014 Mathieu Desnoyers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) enum tp_func_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) TP_FUNC_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) TP_FUNC_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) TP_FUNC_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) TP_FUNC_N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern tracepoint_ptr_t __start___tracepoints_ptrs[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) DEFINE_SRCU(tracepoint_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) EXPORT_SYMBOL_GPL(tracepoint_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) enum tp_transition_sync {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) TP_TRANSITION_SYNC_1_0_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) TP_TRANSITION_SYNC_N_2_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) _NR_TP_TRANSITION_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct tp_transition_snapshot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long srcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bool ongoing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Protected by tracepoints_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void tp_rcu_get_state(enum tp_transition_sync sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Keep the latest get_state snapshot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) snapshot->rcu = get_state_synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) snapshot->ongoing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void tp_rcu_cond_sync(enum tp_transition_sync sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!snapshot->ongoing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) cond_synchronize_rcu(snapshot->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) synchronize_srcu(&tracepoint_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) snapshot->ongoing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Set to 1 to enable tracepoint debug output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static const int tracepoint_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Tracepoint module list mutex protects the local module list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static DEFINE_MUTEX(tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Local list of struct tp_module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static LIST_HEAD(tracepoint_module_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * tracepoints_mutex protects the builtin and module tracepoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * tracepoints_mutex nests inside tracepoint_module_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static DEFINE_MUTEX(tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct rcu_head *early_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static bool ok_to_free_tracepoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Note about RCU :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * It is used to delay the free of multiple probes array until a quiescent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * state is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct tp_probes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct tracepoint_func probes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Called in removal of a func but failed to allocate a new tp_funcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void tp_stub_func(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline void *allocate_probes(int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct tp_probes *p = kmalloc(struct_size(p, probes, count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return p == NULL ? NULL : p->probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void srcu_free_old_probes(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) kfree(container_of(head, struct tp_probes, rcu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void rcu_free_old_probes(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static __init int release_early_probes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct rcu_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ok_to_free_tracepoints = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) while (early_probes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) tmp = early_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) early_probes = tmp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) call_rcu(tmp, rcu_free_old_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* SRCU is initialized at core_initcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) postcore_initcall(release_early_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline void release_probes(struct tracepoint_func *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct tp_probes *tp_probes = container_of(old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct tp_probes, probes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * We can't free probes if SRCU is not initialized yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Postpone the freeing till after SRCU is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (unlikely(!ok_to_free_tracepoints)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) tp_probes->rcu.next = early_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) early_probes = &tp_probes->rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Tracepoint probes are protected by both sched RCU and SRCU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * by calling the SRCU callback in the sched RCU callback we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * cover both cases. So let us chain the SRCU and sched RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * callbacks to wait for both grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) call_rcu(&tp_probes->rcu, rcu_free_old_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void debug_print_probes(struct tracepoint_func *funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!tracepoint_debug || !funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) for (i = 0; funcs[i].func; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static struct tracepoint_func *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct tracepoint_func *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int nr_probes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int stub_funcs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (WARN_ON(!tp_func->func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) debug_print_probes(*funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) old = *funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* (N -> N+1), (N != 0, 1) probes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Insert before probes of lower priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (pos < 0 && old[nr_probes].prio < prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pos = nr_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (old[nr_probes].func == tp_func->func &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) old[nr_probes].data == tp_func->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (old[nr_probes].func == tp_stub_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) stub_funcs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* + 2 : one for new probe, one for NULL func - stub functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) new = allocate_probes(nr_probes + 2 - stub_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (stub_funcs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Need to copy one at a time to remove stubs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int probes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (old[nr_probes].func == tp_stub_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (pos < 0 && old[nr_probes].prio < prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pos = probes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) new[probes++] = old[nr_probes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) nr_probes = probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (pos < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pos = probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) nr_probes--; /* Account for insertion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) } else if (pos < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pos = nr_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Copy higher priority probes ahead of the new probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) memcpy(new, old, pos * sizeof(struct tracepoint_func));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Copy the rest after it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) memcpy(new + pos + 1, old + pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (nr_probes - pos) * sizeof(struct tracepoint_func));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) new[pos] = *tp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) new[nr_probes + 1].func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *funcs = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) debug_print_probes(*funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void *func_remove(struct tracepoint_func **funcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct tracepoint_func *tp_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int nr_probes = 0, nr_del = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct tracepoint_func *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) old = *funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) debug_print_probes(*funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* (N -> M), (N > 1, M >= 0) probes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (tp_func->func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if ((old[nr_probes].func == tp_func->func &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) old[nr_probes].data == tp_func->data) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) old[nr_probes].func == tp_stub_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) nr_del++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * If probe is NULL, then nr_probes = nr_del = 0, and then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * entire entry will be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (nr_probes - nr_del == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* N -> 0, (N > 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *funcs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) debug_print_probes(*funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* N -> M, (N > 1, M > 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* + 1 for NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) new = allocate_probes(nr_probes - nr_del + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) for (i = 0; old[i].func; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if ((old[i].func != tp_func->func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) || old[i].data != tp_func->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) && old[i].func != tp_stub_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) new[j++] = old[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) new[nr_probes - nr_del].func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *funcs = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Failed to allocate, replace the old function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * with calls to tp_stub_func.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (i = 0; old[i].func; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (old[i].func == tp_func->func &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) old[i].data == tp_func->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) old[i].func = tp_stub_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Set the prio to the next event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (old[i + 1].func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) old[i].prio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) old[i + 1].prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) old[i].prio = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *funcs = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) debug_print_probes(*funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Count the number of functions (enum tp_func_state) in a tp_funcs array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!tp_funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return TP_FUNC_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (!tp_funcs[1].func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return TP_FUNC_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!tp_funcs[2].func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return TP_FUNC_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return TP_FUNC_N; /* 3 or more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void *func = tp->iterator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Synthetic events do not have static call sites */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!tp->static_call_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (nr_func_state(tp_funcs) == TP_FUNC_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) func = tp_funcs[0].func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Add the probe function to a tracepoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int tracepoint_add_func(struct tracepoint *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct tracepoint_func *func, int prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bool warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct tracepoint_func *old, *tp_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (tp->regfunc && !static_key_enabled(&tp->key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ret = tp->regfunc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) tp_funcs = rcu_dereference_protected(tp->funcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) lockdep_is_held(&tracepoints_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) old = func_add(&tp_funcs, func, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (IS_ERR(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return PTR_ERR(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * rcu_assign_pointer has as smp_store_release() which makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * that the new probe callbacks array is consistent before setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * a pointer to it. This array is referenced by __DO_TRACE from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * include/linux/tracepoint.h using rcu_dereference_sched().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) switch (nr_func_state(tp_funcs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) case TP_FUNC_1: /* 0->1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Make sure new static func never uses old data after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * 1->0->1 transition sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Set static call to first function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tracepoint_update_call(tp, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Both iterator and static call handle NULL tp->funcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rcu_assign_pointer(tp->funcs, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static_key_enable(&tp->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case TP_FUNC_2: /* 1->2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Set iterator static call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) tracepoint_update_call(tp, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Iterator callback installed before updating tp->funcs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Requires ordering between RCU assign/dereference and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * static call update/call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case TP_FUNC_N: /* N->N+1 (N>1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rcu_assign_pointer(tp->funcs, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Make sure static func never uses incorrect data after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * N->...->2->1 (N>1) transition sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (tp_funcs[0].data != old[0].data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) release_probes(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Remove a probe function from a tracepoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Note: only waiting an RCU period after setting elem->call to the empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * function insures that the original callback is not used anymore. This insured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * by preempt_disable around the call site.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int tracepoint_remove_func(struct tracepoint *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct tracepoint_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct tracepoint_func *old, *tp_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) tp_funcs = rcu_dereference_protected(tp->funcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) lockdep_is_held(&tracepoints_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) old = func_remove(&tp_funcs, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (WARN_ON_ONCE(IS_ERR(old)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return PTR_ERR(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (tp_funcs == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Failed allocating new tp_funcs, replaced func with stub */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) switch (nr_func_state(tp_funcs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case TP_FUNC_0: /* 1->0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Removed last function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (tp->unregfunc && static_key_enabled(&tp->key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tp->unregfunc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static_key_disable(&tp->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* Set iterator static call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) tracepoint_update_call(tp, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Both iterator and static call handle NULL tp->funcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rcu_assign_pointer(tp->funcs, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Make sure new static func never uses old data after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * 1->0->1 transition sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case TP_FUNC_1: /* 2->1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcu_assign_pointer(tp->funcs, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Make sure static func never uses incorrect data after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * N->...->2->1 (N>2) transition sequence. If the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * element's data has changed, then force the synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * to prevent current readers that have loaded the old data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * from calling the new function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (tp_funcs[0].data != old[0].data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Set static call to first function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tracepoint_update_call(tp, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) case TP_FUNC_2: /* N->N-1 (N>2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) case TP_FUNC_N:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) rcu_assign_pointer(tp->funcs, tp_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Make sure static func never uses incorrect data after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * N->...->2->1 (N>2) transition sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (tp_funcs[0].data != old[0].data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) release_probes(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * @tp: tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @probe: probe handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @data: tracepoint data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @prio: priority of this function over other registered functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Same as tracepoint_probe_register_prio() except that it will not warn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * if the tracepoint is already registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void *data, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct tracepoint_func tp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mutex_lock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) tp_func.func = probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tp_func.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) tp_func.prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = tracepoint_add_func(tp, &tp_func, prio, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mutex_unlock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @tp: tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @probe: probe handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @data: tracepoint data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * @prio: priority of this function over other registered functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Returns 0 if ok, error value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Note: if @tp is within a module, the caller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * unregistering the probe before the module is gone. This can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * performed either with a tracepoint module going notifier, or from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * within module exit functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void *data, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct tracepoint_func tp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_lock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tp_func.func = probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tp_func.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) tp_func.prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = tracepoint_add_func(tp, &tp_func, prio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mutex_unlock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * tracepoint_probe_register - Connect a probe to a tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @tp: tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @probe: probe handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @data: tracepoint data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Returns 0 if ok, error value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Note: if @tp is within a module, the caller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * unregistering the probe before the module is gone. This can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * performed either with a tracepoint module going notifier, or from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * within module exit functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) EXPORT_SYMBOL_GPL(tracepoint_probe_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * @tp: tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * @probe: probe function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @data: tracepoint data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Returns 0 if ok, error value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct tracepoint_func tp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) mutex_lock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) tp_func.func = probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tp_func.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ret = tracepoint_remove_func(tp, &tp_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mutex_unlock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void for_each_tracepoint_range(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) void (*fct)(struct tracepoint *tp, void *priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tracepoint_ptr_t *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) for (iter = begin; iter < end; iter++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) fct(tracepoint_ptr_deref(iter), priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bool trace_module_has_bad_taint(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) (1 << TAINT_UNSIGNED_MODULE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * register_tracepoint_notifier - register tracepoint coming/going notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * @nb: notifier block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Notifiers registered with this function are called on module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * coming/going with the tracepoint_module_list_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * The notifier block callback should expect a "struct tp_module" data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int register_tracepoint_module_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct tp_module *tp_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mutex_lock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) list_for_each_entry(tp_mod, &tracepoint_module_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) mutex_unlock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * @nb: notifier block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * The notifier block callback should expect a "struct tp_module" data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int unregister_tracepoint_module_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct tp_module *tp_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mutex_lock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) list_for_each_entry(tp_mod, &tracepoint_module_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) mutex_unlock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Ensure the tracer unregistered the module's probes before the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * teardown is performed. Prevents leaks of probe and data pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) WARN_ON_ONCE(tp->funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static int tracepoint_module_coming(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct tp_module *tp_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!mod->num_tracepoints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * We skip modules that taint the kernel, especially those with different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * module headers (for forced load), to make sure we don't cause a crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Staging, out-of-tree, and unsigned GPL modules are fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (trace_module_has_bad_taint(mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mutex_lock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!tp_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tp_mod->mod = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) list_add_tail(&tp_mod->list, &tracepoint_module_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) blocking_notifier_call_chain(&tracepoint_notify_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) MODULE_STATE_COMING, tp_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) mutex_unlock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void tracepoint_module_going(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct tp_module *tp_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!mod->num_tracepoints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) mutex_lock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (tp_mod->mod == mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) blocking_notifier_call_chain(&tracepoint_notify_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) MODULE_STATE_GOING, tp_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) list_del(&tp_mod->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) kfree(tp_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Called the going notifier before checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * quiescence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) for_each_tracepoint_range(mod->tracepoints_ptrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) mod->tracepoints_ptrs + mod->num_tracepoints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tp_module_going_check_quiescent, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * In the case of modules that were tainted at "coming", we'll simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * walk through the list without finding it. We cannot use the "tainted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * flag on "going", in case a module taints the kernel only after being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mutex_unlock(&tracepoint_module_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int tracepoint_module_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct module *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case MODULE_STATE_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = tracepoint_module_coming(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) case MODULE_STATE_LIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) case MODULE_STATE_GOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tracepoint_module_going(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) case MODULE_STATE_UNFORMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static struct notifier_block tracepoint_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .notifier_call = tracepoint_module_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static __init int init_tracepoints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ret = register_module_notifier(&tracepoint_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pr_warn("Failed to register tracepoint module enter notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) __initcall(init_tracepoints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * for_each_kernel_tracepoint - iteration on all kernel tracepoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * @fct: callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * @priv: private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) for_each_tracepoint_range(__start___tracepoints_ptrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) __stop___tracepoints_ptrs, fct, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static int sys_tracepoint_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int syscall_regfunc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct task_struct *p, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!sys_tracepoint_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) for_each_process_thread(p, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) sys_tracepoint_refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) void syscall_unregfunc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct task_struct *p, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) sys_tracepoint_refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!sys_tracepoint_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) for_each_process_thread(p, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #ifdef CONFIG_ANDROID_VENDOR_HOOKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void *rvh_zalloc_funcs(int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #define ANDROID_RVH_NR_PROBES_MAX 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (!static_key_enabled(&tp->key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* '+ 1' for the last NULL element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!tp->funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!tp->funcs[i].func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!static_key_enabled(&tp->key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) tp->funcs[i].data = func->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) WRITE_ONCE(tp->funcs[i].func, func->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (tp->regfunc && !static_key_enabled(&tp->key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ret = tp->regfunc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ret = rvh_func_add(tp, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tracepoint_update_call(tp, tp->funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static_key_enable(&tp->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct tracepoint_func tp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Once the static key has been flipped, the array may be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * concurrently. Although __traceiter_*() always checks .func first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * it doesn't enforce read->read dependencies, and we can't strongly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * guarantee it will see the correct .data for the second element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * without adding smp_load_acquire() in the fast path. But this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * corner case which is unlikely to be needed by anybody in practice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * so let's just forbid it and keep the fast path clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (WARN_ON(static_key_enabled(&tp->key) && data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) mutex_lock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) tp_func.func = probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) tp_func.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ret = android_rvh_add_func(tp, &tp_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) mutex_unlock(&tracepoints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) EXPORT_SYMBOL_GPL(android_rvh_probe_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) #endif