^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Include in trace.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static inline int trace_valid_entry(struct trace_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) switch (entry->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) case TRACE_FN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) case TRACE_CTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) case TRACE_WAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) case TRACE_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) case TRACE_PRINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) case TRACE_BRANCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) case TRACE_GRAPH_ENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) case TRACE_GRAPH_RET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int loops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * The ring buffer is a size of trace_buf_size, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * we loop more than the size, there's something wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * with the ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (loops++ > trace_buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) printk(KERN_CONT ".. bad ring buffer ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!trace_valid_entry(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) printk(KERN_CONT ".. invalid entry %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* disable tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) tracing_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) printk(KERN_CONT ".. corrupted trace buffer .. ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Test the trace buffer to see if all the elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * are still sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long flags, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int cpu, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Don't allow flipping of max traces now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) arch_spin_lock(&buf->tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cnt = ring_buffer_entries(buf->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * The trace_test_buffer_cpu runs a while loop to consume all data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * If the calling tracer is broken, and is constantly filling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * the buffer, this will run forever, and hard lock the box.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * We disable the ring buffer while we do this test to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * a hard lock up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tracing_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ret = trace_test_buffer_cpu(buf, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) tracing_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) arch_spin_unlock(&buf->tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *count = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) trace->name, init_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int trace_selftest_test_probe1_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void trace_selftest_test_probe1_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) trace_selftest_test_probe1_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int trace_selftest_test_probe2_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void trace_selftest_test_probe2_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) trace_selftest_test_probe2_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int trace_selftest_test_probe3_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void trace_selftest_test_probe3_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) trace_selftest_test_probe3_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int trace_selftest_test_global_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void trace_selftest_test_global_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) trace_selftest_test_global_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int trace_selftest_test_dyn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void trace_selftest_test_dyn_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) trace_selftest_test_dyn_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static struct ftrace_ops test_probe1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) .func = trace_selftest_test_probe1_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) .flags = FTRACE_OPS_FL_RECURSION_SAFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static struct ftrace_ops test_probe2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .func = trace_selftest_test_probe2_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) .flags = FTRACE_OPS_FL_RECURSION_SAFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static struct ftrace_ops test_probe3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .func = trace_selftest_test_probe3_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .flags = FTRACE_OPS_FL_RECURSION_SAFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void print_counts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) printk("(%d %d %d %d %d) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) trace_selftest_test_probe1_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) trace_selftest_test_probe2_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trace_selftest_test_probe3_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) trace_selftest_test_global_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) trace_selftest_test_dyn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void reset_counts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) trace_selftest_test_probe1_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) trace_selftest_test_probe2_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) trace_selftest_test_probe3_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) trace_selftest_test_global_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) trace_selftest_test_dyn_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int trace_selftest_ops(struct trace_array *tr, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int save_ftrace_enabled = ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct ftrace_ops *dyn_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) char *func1_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) char *func2_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int len1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int len2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) printk(KERN_CONT "PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pr_info("Testing dynamic ftrace ops #%d: ", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) reset_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Handle PPC64 '.' name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) len1 = strlen(func1_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) len2 = strlen(func2_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Probe 1 will trace function 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Probe 2 will trace function 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Probe 3 will trace functions 1 and 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ftrace_set_filter(&test_probe1, func1_name, len1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ftrace_set_filter(&test_probe2, func2_name, len2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ftrace_set_filter(&test_probe3, func1_name, len1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ftrace_set_filter(&test_probe3, func2_name, len2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) register_ftrace_function(&test_probe1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) register_ftrace_function(&test_probe2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) register_ftrace_function(&test_probe3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* First time we are running with main function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ftrace_init_array_ops(tr, trace_selftest_test_global_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) register_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) print_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (trace_selftest_test_probe1_cnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (trace_selftest_test_probe2_cnt != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (trace_selftest_test_probe3_cnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (trace_selftest_test_global_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) DYN_FTRACE_TEST_NAME2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) print_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (trace_selftest_test_probe1_cnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (trace_selftest_test_probe2_cnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (trace_selftest_test_probe3_cnt != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Add a dynamic probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!dyn_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) printk("MEMORY ERROR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dyn_ops->func = trace_selftest_test_dyn_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) register_ftrace_function(dyn_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) trace_selftest_test_global_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) print_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (trace_selftest_test_probe1_cnt != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (trace_selftest_test_probe2_cnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (trace_selftest_test_probe3_cnt != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (trace_selftest_test_global_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (trace_selftest_test_dyn_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) DYN_FTRACE_TEST_NAME2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) print_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (trace_selftest_test_probe1_cnt != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (trace_selftest_test_probe2_cnt != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (trace_selftest_test_probe3_cnt != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unregister_ftrace_function(dyn_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kfree(dyn_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Purposely unregister in the same order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unregister_ftrace_function(&test_probe1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unregister_ftrace_function(&test_probe2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unregister_ftrace_function(&test_probe3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unregister_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ftrace_reset_array_ops(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Make sure everything is off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) reset_counts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (trace_selftest_test_probe1_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) trace_selftest_test_probe2_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) trace_selftest_test_probe3_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) trace_selftest_test_global_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) trace_selftest_test_dyn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ftrace_enabled = save_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Test dynamic code modification and ftrace filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int (*func)(void))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int save_ftrace_enabled = ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) char *func_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* The ftrace test PASSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) printk(KERN_CONT "PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pr_info("Testing dynamic ftrace: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* enable tracing, and record the filter function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* passed in by parameter to fool gcc from optimizing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Some archs *cough*PowerPC*cough* add characters to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * start of the function names. We simply put a '*' to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * accommodate them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* filter only on our function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ftrace_set_global_filter(func_name, strlen(func_name), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* enable tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Sleep for a 1/10 of a second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* we should have nothing in the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = trace_test_buffer(&tr->array_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) printk(KERN_CONT ".. filter did not filter .. ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* call our function again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* sleep again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ftrace_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* check the trace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ret = trace_test_buffer(&tr->array_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* we should only have one item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!ret && count != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) printk(KERN_CONT ".. filter failed count=%ld ..", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Test the ops with global tracing running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ret = trace_selftest_ops(tr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ftrace_enabled = save_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Enable tracing on all functions again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ftrace_set_global_filter(NULL, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Test the ops with global tracing off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = trace_selftest_ops(tr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int trace_selftest_recursion_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void trace_selftest_test_recursion_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * This function is registered without the recursion safe flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * The ftrace infrastructure should provide the recursion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * protection. If not, this will crash the kernel!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (trace_selftest_recursion_cnt++ > 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void trace_selftest_test_recursion_safe_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * We said we would provide our own recursion. By calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * this function again, we should recurse back into this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * and count again. But this only happens if the arch supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * all of ftrace features and nothing else is using the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * tracing utility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (trace_selftest_recursion_cnt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static struct ftrace_ops test_rec_probe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) .func = trace_selftest_test_recursion_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static struct ftrace_ops test_recsafe_probe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .func = trace_selftest_test_recursion_safe_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .flags = FTRACE_OPS_FL_RECURSION_SAFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) trace_selftest_function_recursion(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int save_ftrace_enabled = ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) char *func_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* The previous test PASSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pr_cont("PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pr_info("Testing ftrace recursion: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* enable tracing, and record the filter function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Handle PPC64 '.' name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) len = strlen(func_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pr_cont("*Could not set filter* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret = register_ftrace_function(&test_rec_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pr_cont("*could not register callback* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unregister_ftrace_function(&test_rec_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Recursion allows for transitions between context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * and may call the callback twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (trace_selftest_recursion_cnt != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) trace_selftest_recursion_cnt != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pr_cont("*callback not called once (or twice) (%d)* ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) trace_selftest_recursion_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) trace_selftest_recursion_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pr_cont("PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) pr_info("Testing ftrace recursion safe: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) pr_cont("*Could not set filter* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = register_ftrace_function(&test_recsafe_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pr_cont("*could not register callback* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unregister_ftrace_function(&test_recsafe_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (trace_selftest_recursion_cnt != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pr_cont("*callback not called expected 2 times (%d)* ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) trace_selftest_recursion_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ftrace_enabled = save_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) # define trace_selftest_function_recursion() ({ 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) TRACE_SELFTEST_REGS_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) TRACE_SELFTEST_REGS_FOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) TRACE_SELFTEST_REGS_NOT_FOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) } trace_selftest_regs_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void trace_selftest_test_regs_func(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct ftrace_ops *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static struct ftrace_ops test_regs_probe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) .func = trace_selftest_test_regs_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) trace_selftest_function_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int save_ftrace_enabled = ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) char *func_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* The previous test PASSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pr_cont("PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) pr_info("Testing ftrace regs%s: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) !supported ? "(no arch support)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* enable tracing, and record the filter function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Handle PPC64 '.' name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) len = strlen(func_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * If DYNAMIC_FTRACE is not set, then we just trace all functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * This test really doesn't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ret && ret != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pr_cont("*Could not set filter* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = register_ftrace_function(&test_regs_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Now if the arch does not support passing regs, then this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * have failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pr_cont("*registered save-regs without arch support* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret = register_ftrace_function(&test_regs_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) pr_cont("*could not register callback* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) DYN_FTRACE_TEST_NAME();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unregister_ftrace_function(&test_regs_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) switch (trace_selftest_regs_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) case TRACE_SELFTEST_REGS_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pr_cont("*callback never called* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case TRACE_SELFTEST_REGS_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pr_cont("*callback received regs without arch support* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) case TRACE_SELFTEST_REGS_NOT_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) pr_cont("*callback received NULL regs* ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ftrace_enabled = save_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Simple verification test of ftrace function tracer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Enable ftrace, sleep 1/10 second, and then read the trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * buffer to see if all is in order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) __init int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int save_ftrace_enabled = ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (ftrace_filter_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* make sure msleep has been recorded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Sleep for a 1/10 of a second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ftrace_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* check the trace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = trace_test_buffer(&tr->array_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ret = trace_selftest_startup_dynamic_tracing(trace, tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) DYN_FTRACE_TEST_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = trace_selftest_function_recursion();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = trace_selftest_function_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ftrace_enabled = save_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* kill ftrace totally if we failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ftrace_kill();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) #endif /* CONFIG_FUNCTION_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Maximum number of functions to trace before diagnosing a hang */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #define GRAPH_MAX_FUNC_TEST 100000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static unsigned int graph_hang_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Wrap the real function entry probe to avoid possible hanging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* This is harmlessly racy, we want to approximately detect a hang */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (ftrace_dump_on_oops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* ftrace_dump() disables tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tracing_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return trace_graph_entry(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static struct fgraph_ops fgraph_ops __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .entryfunc = &trace_graph_entry_watchdog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .retfunc = &trace_graph_return,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * Pretty much the same than for the function tracer from which the selftest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * has been borrowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) __init int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) trace_selftest_startup_function_graph(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (ftrace_filter_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Simulate the init() callback but we attach a watchdog callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * to detect and recover from possible hangs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) set_graph_array(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = register_ftrace_graph(&fgraph_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) tracing_start_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Sleep for a 1/10 of a second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Have we just recovered from a hang? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) disable_tracing_selftest("recovering from a hang");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* check the trace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = trace_test_buffer(&tr->array_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Need to also simulate the tr->reset to remove this fgraph_ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) tracing_stop_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) unregister_ftrace_graph(&fgraph_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Don't test dynamic tracing, the function tracer already did */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Stop it if we failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #ifdef CONFIG_IRQSOFF_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned long save_max = tr->max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* reset the max latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tr->max_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* disable interrupts for a bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Stop the tracer to avoid a warning subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * to buffer flipping failure because tracing_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * disables the tr and max buffers, making flipping impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * in case of parallels max irqs off latencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) trace->stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* check both trace buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = trace_test_buffer(&tr->array_buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ret = trace_test_buffer(&tr->max_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) tr->max_latency = save_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) #endif /* CONFIG_IRQSOFF_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #ifdef CONFIG_PREEMPT_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned long save_max = tr->max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Now that the big kernel lock is no longer preemptable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * and this is called with the BKL held, it will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * fail. If preemption is already disabled, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * pass the test. When the BKL is removed, or becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * preemptible again, we will once again test this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * so keep it in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (preempt_count()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) printk(KERN_CONT "can not test ... force ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* reset the max latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) tr->max_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* disable preemption for a bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * Stop the tracer to avoid a warning subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * to buffer flipping failure because tracing_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * disables the tr and max buffers, making flipping impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * in case of parallels max preempt off latencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) trace->stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* check both trace buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ret = trace_test_buffer(&tr->array_buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = trace_test_buffer(&tr->max_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tr->max_latency = save_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) #endif /* CONFIG_PREEMPT_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long save_max = tr->max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Now that the big kernel lock is no longer preemptable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * and this is called with the BKL held, it will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * fail. If preemption is already disabled, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * pass the test. When the BKL is removed, or becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * preemptible again, we will once again test this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * so keep it in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (preempt_count()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) printk(KERN_CONT "can not test ... force ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto out_no_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* reset the max latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) tr->max_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* disable preemption and interrupts for a bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* reverse the order of preempt vs irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Stop the tracer to avoid a warning subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * to buffer flipping failure because tracing_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * disables the tr and max buffers, making flipping impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * in case of parallels max irqs/preempt off latencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) trace->stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* check both trace buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ret = trace_test_buffer(&tr->array_buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ret = trace_test_buffer(&tr->max_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* do the test by disabling interrupts first this time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) tr->max_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) trace->start(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* reverse the order of preempt vs irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) trace->stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* check both trace buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret = trace_test_buffer(&tr->array_buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ret = trace_test_buffer(&tr->max_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) out_no_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tr->max_latency = save_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #ifdef CONFIG_NOP_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* What could possibly go wrong? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #ifdef CONFIG_SCHED_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct wakeup_test_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct completion is_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int go;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static int trace_wakeup_test_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Make this a -deadline thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static const struct sched_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) .sched_policy = SCHED_DEADLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) .sched_runtime = 100000ULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .sched_deadline = 10000000ULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) .sched_period = 10000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct wakeup_test_data *x = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) sched_setattr(current, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Make it know we have a new prio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) complete(&x->is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* now go to sleep and let the test wake us up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) while (!x->go) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) complete(&x->is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* we are awake, now wait to disappear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) unsigned long save_max = tr->max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct wakeup_test_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) memset(&data, 0, sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) init_completion(&data.is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* create a -deadline thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* make sure the thread is running at -deadline policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) wait_for_completion(&data.is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* reset the max latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) tr->max_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) while (p->on_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Sleep to make sure the -deadline thread is asleep too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * On virtual machines we can't rely on timings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * but we want to make sure this test still works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) init_completion(&data.is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) data.go = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* memory barrier is in the wake_up_process() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) wake_up_process(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Wait for the task to wake up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) wait_for_completion(&data.is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* check both trace buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ret = trace_test_buffer(&tr->array_buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ret = trace_test_buffer(&tr->max_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) tr->max_latency = save_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* kill the thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kthread_stop(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #endif /* CONFIG_SCHED_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) #ifdef CONFIG_BRANCH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* start the tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ret = tracer_init(trace, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) warn_failed_init_tracer(trace, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Sleep for a 1/10 of a second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* stop the tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* check the trace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ret = trace_test_buffer(&tr->array_buffer, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!ret && !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) printk(KERN_CONT ".. no entries found ..");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #endif /* CONFIG_BRANCH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)