^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Performance events callchain code, extracted from core.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct callchain_cpus_entries {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct rcu_head rcu_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct perf_callchain_entry *cpu_entries[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline size_t perf_callchain_entry__sizeof(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return (sizeof(struct perf_callchain_entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) sizeof(__u64) * (sysctl_perf_event_max_stack +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sysctl_perf_event_max_contexts_per_stack));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static atomic_t nr_callchain_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_MUTEX(callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct callchain_cpus_entries *callchain_cpus_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void release_callchain_buffers_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct callchain_cpus_entries *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) entries = container_of(head, struct callchain_cpus_entries, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kfree(entries->cpu_entries[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) kfree(entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void release_callchain_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct callchain_cpus_entries *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) entries = callchain_cpus_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) RCU_INIT_POINTER(callchain_cpus_entries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static int alloc_callchain_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct callchain_cpus_entries *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * We can't use the percpu allocation API for data that can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * accessed from NMI. Use a temporary manual per cpu allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * until that gets sorted out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) entries = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!entries->cpu_entries[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) rcu_assign_pointer(callchain_cpus_entries, entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) kfree(entries->cpu_entries[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) kfree(entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int get_callchain_buffers(int event_max_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mutex_lock(&callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) count = atomic_inc_return(&nr_callchain_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (WARN_ON_ONCE(count < 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * If requesting per event more than the global cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * return a different error to help userspace figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * this out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * And also do it here so that we have &callchain_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (event_max_stack > sysctl_perf_event_max_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) err = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) err = alloc_callchain_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) atomic_dec(&nr_callchain_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mutex_unlock(&callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void put_callchain_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) release_callchain_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mutex_unlock(&callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct perf_callchain_entry *get_callchain_entry(int *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct callchain_cpus_entries *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (*rctx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) entries = rcu_dereference(callchain_cpus_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return (((void *)entries->cpu_entries[cpu]) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) (*rctx * perf_callchain_entry__sizeof()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) put_callchain_entry(int rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct perf_callchain_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 max_stack, bool crosstask, bool add_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct perf_callchain_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct perf_callchain_entry_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) entry = get_callchain_entry(&rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ctx.entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ctx.max_stack = max_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ctx.nr = entry->nr = init_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ctx.contexts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ctx.contexts_maxed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (kernel && !user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (add_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) perf_callchain_kernel(&ctx, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (current->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) regs = task_pt_regs(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mm_segment_t fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (crosstask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) goto exit_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (add_mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) fs = force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) perf_callchain_user(&ctx, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) force_uaccess_end(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) exit_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) put_callchain_entry(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Used for sysctl_perf_event_max_stack and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * sysctl_perf_event_max_contexts_per_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int perf_event_max_stack_handler(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int *value = table->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int new_value = *value, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct ctl_table new_table = *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) new_table.data = &new_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (ret || !write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) mutex_lock(&callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (atomic_read(&nr_callchain_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *value = new_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mutex_unlock(&callchain_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }