^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * builtin-trace.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Builtin 'trace' command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Display a continuously updated trace of any workload, CPU, specific PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * system wide, etc. Default format is loosely strace like, but any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * event may be specified using --event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Initially based on the 'trace' prototype by Thomas Gleixner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "util/record.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <traceevent/event-parse.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <api/fs/tracing_path.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <bpf/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "util/bpf_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "util/rlimit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "builtin.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "util/cgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "util/color.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "util/config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "util/debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "util/dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "util/env.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "util/event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "util/evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "util/evsel_fprintf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "util/synthetic-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "util/evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "util/evswitch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "util/mmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <subcmd/pager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <subcmd/exec-cmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "util/machine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "util/map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "util/symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "util/path.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "util/session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "util/thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <subcmd/parse-options.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "util/strlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "util/intlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "util/thread_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "util/stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "util/tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "util/util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "trace/beauty/beauty.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "trace-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "util/parse-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "util/bpf-loader.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "callchain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "print_binary.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "string2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "syscalltbl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "rb_resort.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "../perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/time64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <sys/sysmacros.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <perf/mmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #ifndef O_CLOEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) # define O_CLOEXEC 02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifndef F_LINUX_SPECIFIC_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) # define F_LINUX_SPECIFIC_BASE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct syscall_arg_fmt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void *parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u16 nr_entries; // for arrays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool show_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct syscall_fmt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) const char *alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const char *sys_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } bpf_prog_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct syscall_arg_fmt arg[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u8 nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bool errpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bool hexret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct trace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct perf_tool tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct syscalltbl *sctbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct syscall *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct bpf_map *sys_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) } prog_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct evsel *sys_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *sys_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *augmented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) } events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct bpf_program *unaugmented_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } syscalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct record_opts opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct evlist *evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct machine *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct thread *current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct bpf_object *bpf_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct cgroup *cgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 base_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) FILE *output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long nr_events_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct evswitch evswitch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct strlist *ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) size_t nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) } ev_qualifier_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) size_t nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pid_t *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) } filter_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) double duration_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) double runtime_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 vfs_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) proc_getname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int max_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int min_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int raw_augmented_syscalls_args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bool raw_augmented_syscalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bool fd_path_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bool sort_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool not_ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bool live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bool full_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bool sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bool multiple_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) bool summary_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool errno_summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) bool failure_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bool show_comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bool print_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bool show_tool_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bool trace_syscalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bool libtraceevent_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) bool kernel_syscallchains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) s16 args_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bool show_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool show_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bool show_zeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool show_arg_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bool show_string_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bool force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bool vfs_getname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int trace_pgfaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) char *perfconfig_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ordered_events data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) } oe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct tp_field {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define TP_UINT_FIELD(bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u##bits value; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return value; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) TP_UINT_FIELD(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) TP_UINT_FIELD(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) TP_UINT_FIELD(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) TP_UINT_FIELD(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define TP_UINT_FIELD__SWAPPED(bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u##bits value; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return bswap_##bits(value);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) TP_UINT_FIELD__SWAPPED(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) TP_UINT_FIELD__SWAPPED(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) TP_UINT_FIELD__SWAPPED(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) field->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) field->integer = tp_field__u8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return sample->raw_data + field->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int __tp_field__init_ptr(struct tp_field *field, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) field->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) field->pointer = tp_field__ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return __tp_field__init_ptr(field, format_field->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct syscall_tp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct tp_field id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct tp_field args, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * The evsel->priv as used by 'perf trace'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * fmt: for all the other tracepoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct evsel_trace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct syscall_tp sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct syscall_arg_fmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static struct evsel_trace *evsel_trace__new(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return zalloc(sizeof(struct evsel_trace));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void evsel_trace__delete(struct evsel_trace *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (et == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) zfree(&et->fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) free(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Used with raw_syscalls:sys_{enter,exit} and with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * syscalls:sys_{enter,exit}_SYSCALL tracepoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct evsel_trace *et = evsel->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return &et->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (evsel->priv == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) evsel->priv = evsel_trace__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (evsel->priv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Used with all the other tracepoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct evsel_trace *et = evsel->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return et->fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct evsel_trace *et = evsel->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (evsel->priv == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) et = evsel->priv = evsel_trace__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (et == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (et->fmt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (et->fmt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return __evsel__syscall_arg_fmt(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) out_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) evsel_trace__delete(evsel->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) evsel->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct tep_format_field *format_field = evsel__field(evsel, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (format_field == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return tp_field__init_uint(field, format_field, evsel->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) evsel__init_tp_uint_field(evsel, &sc->name, #name); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct tep_format_field *format_field = evsel__field(evsel, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (format_field == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return tp_field__init_ptr(field, format_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void evsel__delete_priv(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) zfree(&evsel->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) evsel__delete(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int evsel__init_syscall_tp(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct syscall_tp *sc = evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (sc != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct syscall_tp *sc = evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (sc != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct tep_format_field *syscall_id = evsel__field(tp, "id");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (syscall_id == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) syscall_id = evsel__field(tp, "__syscall_nr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (syscall_id == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct syscall_tp *sc = __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct syscall_tp *sc = __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (evsel__syscall_tp(evsel) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (perf_evsel__init_sc_tp_uint_field(evsel, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) evsel->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (IS_ERR(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) evsel = evsel__newtp("syscalls", direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (IS_ERR(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (evsel__init_raw_syscall_tp(evsel, handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) out_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) evsel__delete_priv(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #define perf_evsel__sc_tp_uint(evsel, name, sample) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) fields->name.integer(&fields->name, sample); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fields->name.pointer(&fields->name, sample); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int idx = val - sa->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) size_t printed = scnprintf(bf, size, intfmt, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (show_suffix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int idx = val - sa->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) size_t printed = scnprintf(bf, size, intfmt, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (show_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) const char *intfmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #define SCA_STRARRAY syscall_arg__scnprintf_strarray
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return strarray__strtoul(arg->parm, bf, size, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return strarray__strtoul_flags(arg->parm, bf, size, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return strarrays__strtoul(arg->parm, bf, size, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) size_t printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) for (i = 0; i < sas->nr_entries; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct strarray *sa = sas->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int idx = val - sa->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (idx >= 0 && idx < sa->nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (sa->entries[idx] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) printed = scnprintf(bf, size, intfmt, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (show_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for (i = 0; i < sa->nr_entries; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *ret = sa->offset + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) char *tok = bf, *sep, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) *ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) while (size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int toklen = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) sep = memchr(tok, '|', size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (sep != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) size -= sep - tok + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) end = sep - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) while (end > tok && isspace(*end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) --end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) toklen = end - tok + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) while (isspace(*tok))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ++tok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (isalpha(*tok) || *tok == '_') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!strarray__strtoul(sa, tok, toklen, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) val = strtoul(tok, NULL, is_hexa ? 16 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *ret |= (1 << (val - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (sep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) tok = sep + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (i = 0; i < sas->nr_entries; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct strarray *sa = sas->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (strarray__strtoul(sa, bf, size, ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) #ifndef AT_FDCWD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #define AT_FDCWD -100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int fd = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) const char *prefix = "AT_FD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (fd == AT_FDCWD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return syscall_arg__scnprintf_fd(bf, size, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) #define SCA_FDAT syscall_arg__scnprintf_fd_at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct syscall_arg *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return scnprintf(bf, size, "%#lx", arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (arg->val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return scnprintf(bf, size, "NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return syscall_arg__scnprintf_hex(bf, size, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return scnprintf(bf, size, "%d", arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return scnprintf(bf, size, "%ld", arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) // fill missing comms using thread__set_comm()...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) // here or in a special syscall_arg__scnprintf_pid_sched_tp...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static const char *bpf_cmd[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) "MAP_GET_NEXT_KEY", "PROG_LOAD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static DEFINE_STRARRAY(bpf_cmd, "BPF_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static const char *fsmount_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) [1] = "CLOEXEC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #include "trace/beauty/generated/fsconfig_arrays.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static DEFINE_STRARRAY(itimers, "ITIMER_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static const char *keyctl_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static const char *whences[] = { "SET", "CUR", "END",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) #ifdef SEEK_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) "DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #ifdef SEEK_HOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) "HOLE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static DEFINE_STRARRAY(whences, "SEEK_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static const char *fcntl_cmds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) "GETOWNER_UIDS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static DEFINE_STRARRAY(fcntl_cmds, "F_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static const char *fcntl_linux_specific_cmds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static struct strarray *fcntl_cmds_arrays[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) &strarray__fcntl_cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) &strarray__fcntl_linux_specific_cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static DEFINE_STRARRAYS(fcntl_cmds_arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static const char *rlimit_resources[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) "RTTIME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static DEFINE_STRARRAY(sighow, "SIG_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static const char *clockid[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static DEFINE_STRARRAY(clockid, "CLOCK_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) bool show_prefix = arg->show_string_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) const char *suffix = "_OK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int mode = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (mode == F_OK) /* 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #define P_MODE(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (mode & n##_OK) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) mode &= ~n##_OK; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) P_MODE(R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) P_MODE(W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) P_MODE(X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #undef P_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct syscall_arg *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #define SCA_FILENAME syscall_arg__scnprintf_filename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) bool show_prefix = arg->show_string_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) const char *prefix = "O_";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int printed = 0, flags = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #define P_FLAG(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (flags & O_##n) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) flags &= ~O_##n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) P_FLAG(CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) P_FLAG(NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #undef P_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #ifndef GRND_NONBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) #define GRND_NONBLOCK 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) #ifndef GRND_RANDOM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #define GRND_RANDOM 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bool show_prefix = arg->show_string_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) const char *prefix = "GRND_";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int printed = 0, flags = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) #define P_FLAG(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (flags & GRND_##n) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) flags &= ~GRND_##n; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) P_FLAG(RANDOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) P_FLAG(NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #undef P_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) #define STRARRAY(name, array) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) { .scnprintf = SCA_STRARRAY, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) .strtoul = STUL_STRARRAY, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) .parm = &strarray__##array, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) #define STRARRAY_FLAGS(name, array) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) { .scnprintf = SCA_STRARRAY_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) .strtoul = STUL_STRARRAY_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) .parm = &strarray__##array, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #include "trace/beauty/arch_errno_names.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #include "trace/beauty/eventfd.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) #include "trace/beauty/futex_op.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) #include "trace/beauty/futex_val3.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) #include "trace/beauty/mmap.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #include "trace/beauty/mode_t.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) #include "trace/beauty/msg_flags.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #include "trace/beauty/open_flags.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #include "trace/beauty/perf_event_open.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #include "trace/beauty/pid.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) #include "trace/beauty/sched_policy.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #include "trace/beauty/seccomp.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) #include "trace/beauty/signum.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #include "trace/beauty/socket_type.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) #include "trace/beauty/waitid_options.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static struct syscall_fmt syscall_fmts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) { .name = "access",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) { .name = "arch_prctl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) { .name = "bind",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) { .name = "bpf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) { .name = "brk", .hexret = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) { .name = "clock_gettime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .arg = { [0] = STRARRAY(clk_id, clockid), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) { .name = "clone", .errpid = true, .nr_args = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) { .name = "close",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) { .name = "connect",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) { .name = "epoll_ctl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) { .name = "eventfd2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) { .name = "fchmodat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) { .name = "fchownat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) { .name = "fcntl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .strtoul = STUL_STRARRAYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .parm = &strarrays__fcntl_cmds_arrays,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .show_zero = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) { .name = "flock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) { .name = "fsconfig",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) { .name = "fsmount",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) { .name = "fspick",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) [1] = { .scnprintf = SCA_FILENAME, /* path */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) { .name = "fstat", .alias = "newfstat", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) { .name = "fstatat", .alias = "newfstatat", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) { .name = "futex",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) { .name = "futimesat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) { .name = "getitimer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .arg = { [0] = STRARRAY(which, itimers), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) { .name = "getpid", .errpid = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) { .name = "getpgid", .errpid = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) { .name = "getppid", .errpid = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) { .name = "getrandom",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) { .name = "getrlimit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) { .name = "gettid", .errpid = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) { .name = "ioctl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #if defined(__i386__) || defined(__x86_64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * FIXME: Make this available to all arches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) { .name = "kcmp", .nr_args = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) [1] = { .name = "pid2", .scnprintf = SCA_PID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) { .name = "keyctl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .arg = { [0] = STRARRAY(option, keyctl_options), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) { .name = "kill",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) { .name = "linkat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) { .name = "lseek",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .arg = { [2] = STRARRAY(whence, whences), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) { .name = "lstat", .alias = "newlstat", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) { .name = "madvise",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) { .name = "mkdirat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) { .name = "mknodat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) { .name = "mmap", .hexret = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* The standard mmap maps to old_mmap on s390x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #if defined(__s390x__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .alias = "old_mmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .strtoul = STUL_STRARRAY_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .parm = &strarray__mmap_flags, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) { .name = "mount",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) { .name = "move_mount",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) { .name = "mprotect",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) { .name = "mq_unlink",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) { .name = "mremap", .hexret = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) { .name = "name_to_handle_at",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) { .name = "newfstatat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) { .name = "open",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) { .name = "open_by_handle_at",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) { .name = "openat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) { .name = "perf_event_open",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) [3] = { .scnprintf = SCA_FD, /* group_fd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) { .name = "pipe2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) { .name = "pkey_alloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) { .name = "pkey_free",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) { .name = "pkey_mprotect",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) { .name = "poll", .timeout = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) { .name = "ppoll", .timeout = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) { .name = "prctl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .strtoul = STUL_STRARRAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .parm = &strarray__prctl_options, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) { .name = "pread", .alias = "pread64", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) { .name = "preadv", .alias = "pread", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) { .name = "prlimit64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) { .name = "pwrite", .alias = "pwrite64", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) { .name = "readlinkat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) { .name = "recvfrom",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) { .name = "recvmmsg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) { .name = "recvmsg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) { .name = "renameat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) { .name = "renameat2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) { .name = "rt_sigaction",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) { .name = "rt_sigprocmask",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) .arg = { [0] = STRARRAY(how, sighow), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) { .name = "rt_sigqueueinfo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) { .name = "rt_tgsigqueueinfo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) { .name = "sched_setscheduler",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) { .name = "seccomp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) { .name = "select", .timeout = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) { .name = "sendfile", .alias = "sendfile64", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) { .name = "sendmmsg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) { .name = "sendmsg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) { .name = "sendto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) { .name = "set_tid_address", .errpid = true, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) { .name = "setitimer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .arg = { [0] = STRARRAY(which, itimers), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) { .name = "setrlimit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) { .name = "socket",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .arg = { [0] = STRARRAY(family, socket_families),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) { .name = "socketpair",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .arg = { [0] = STRARRAY(family, socket_families),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) { .name = "stat", .alias = "newstat", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) { .name = "statx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) { .name = "swapoff",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) { .name = "swapon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) { .name = "symlinkat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) { .name = "sync_file_range",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) { .name = "tgkill",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) { .name = "tkill",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) { .name = "umount2", .alias = "umount",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) { .name = "uname", .alias = "newuname", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) { .name = "unlinkat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) { .name = "utimensat",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) { .name = "wait4", .errpid = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) { .name = "waitid", .errpid = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static int syscall_fmt__cmp(const void *name, const void *fmtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) const struct syscall_fmt *fmt = fmtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return strcmp(name, fmt->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static struct syscall_fmt *syscall_fmt__find(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) const int nmemb = ARRAY_SIZE(syscall_fmts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return __syscall_fmt__find(syscall_fmts, nmemb, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) for (i = 0; i < nmemb; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return &fmts[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) const int nmemb = ARRAY_SIZE(syscall_fmts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * is_exit: is this "exit" or "exit_group"?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * nonexistent: Just a hole in the syscall table, syscall id not allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct syscall {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct tep_event *tp_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct bpf_program *sys_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) } bpf_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) bool is_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) bool is_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) bool nonexistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct tep_format_field *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct syscall_fmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct syscall_arg_fmt *arg_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * Must match what is in the BPF program:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * tools/perf/examples/bpf/augmented_raw_syscalls.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct bpf_map_syscall_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) u16 string_args_len[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * We need to have this 'calculated' boolean because in some cases we really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * don't know what is the duration of a syscall, for instance, when we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * a session and some threads are waiting for a syscall to finish, say 'poll',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * in which case all we can do is to print "( ? ) for duration and for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * start timestamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) double duration = (double)t / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) size_t printed = fprintf(fp, "(");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!calculated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) printed += fprintf(fp, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) else if (duration >= 1.0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) else if (duration >= 0.01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return printed + fprintf(fp, "): ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * filename.ptr: The filename char pointer that will be vfs_getname'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * filename.entry_str_pos: Where to insert the string translated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * filename.ptr by the vfs_getname tracepoint/kprobe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * ret_scnprintf: syscall args may set this to a different syscall return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * formatter, for instance, fcntl may return fds, file flags, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct thread_trace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) u64 entry_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) bool entry_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) unsigned long nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) unsigned long pfmaj, pfmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) char *entry_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) double runtime_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) short int entry_str_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) bool pending_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) unsigned int namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) } filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct file *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) } files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct intlist *syscall_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static struct thread_trace *thread_trace__new(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (ttrace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ttrace->files.max = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ttrace->syscall_stats = intlist__new(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (thread__priv(thread) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) thread__set_priv(thread, thread_trace__new());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (thread__priv(thread) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ++ttrace->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) color_fprintf(fp, PERF_COLOR_RED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) "WARNING: not enough memory, dropping samples!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct thread_trace *ttrace = thread__priv(arg->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ttrace->ret_scnprintf = ret_scnprintf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #define TRACE_PFMAJ (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #define TRACE_PFMIN (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static const size_t trace__entry_str_size = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (fd > ttrace->files.max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (nfiles == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (ttrace->files.max != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) memset(nfiles + ttrace->files.max + 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) (fd - ttrace->files.max) * sizeof(struct file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) memset(nfiles, 0, (fd + 1) * sizeof(struct file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ttrace->files.table = nfiles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) ttrace->files.max = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return ttrace->files.table + fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct file *thread__files_entry(struct thread *thread, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return thread_trace__files_entry(thread__priv(thread), fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct thread_trace *ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct file *file = thread_trace__files_entry(ttrace, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (file != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct stat st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (stat(pathname, &st) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) file->dev_maj = major(st.st_rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) file->pathname = strdup(pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (file->pathname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int thread__read_fd_path(struct thread *thread, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) char linkname[PATH_MAX], pathname[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct stat st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (thread->pid_ == thread->tid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) scnprintf(linkname, sizeof(linkname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) "/proc/%d/fd/%d", thread->pid_, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) scnprintf(linkname, sizeof(linkname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ret = readlink(linkname, pathname, sizeof(pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (ret < 0 || ret > st.st_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) pathname[ret] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return trace__set_fd_pathname(thread, fd, pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static const char *thread__fd_path(struct thread *thread, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct thread_trace *ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (ttrace == NULL || trace->fd_path_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!trace->live)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ++trace->stats.proc_getname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (thread__read_fd_path(thread, fd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return ttrace->files.table[fd].pathname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int fd = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) size_t printed = scnprintf(bf, size, "%d", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) const char *path = thread__fd_path(arg->thread, fd, arg->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) printed += scnprintf(bf + printed, size - printed, "<%s>", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) size_t printed = scnprintf(bf, size, "%d", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct thread *thread = machine__find_thread(trace->host, pid, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) const char *path = thread__fd_path(thread, fd, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) printed += scnprintf(bf + printed, size - printed, "<%s>", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int fd = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct thread_trace *ttrace = thread__priv(arg->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (ttrace && fd >= 0 && fd <= ttrace->files.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) zfree(&ttrace->files.table[fd].pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static void thread__set_filename_pos(struct thread *thread, const char *bf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) unsigned long ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct thread_trace *ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) ttrace->filename.ptr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct augmented_arg *augmented_arg = arg->augmented.args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * we would have two strings, each prefixed by its size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) int consumed = sizeof(*augmented_arg) + augmented_arg->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) arg->augmented.args = ((void *)arg->augmented.args) + consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) arg->augmented.size -= consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) unsigned long ptr = arg->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (arg->augmented.args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return syscall_arg__scnprintf_augmented_string(arg, bf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!arg->trace->vfs_getname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return scnprintf(bf, size, "%#x", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) thread__set_filename_pos(arg->thread, bf, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static bool trace__filter_duration(struct trace *trace, double t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return t < (trace->duration_filter * NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return fprintf(fp, "%10.3f ", ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * using ttrace->entry_time for a thread that receives a sys_exit without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * first having received a sys_enter ("poll" issued before tracing session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * starts, lost sys_enter exit due to ring buffer overflow).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (tstamp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return __trace__fprintf_tstamp(trace, tstamp, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return fprintf(fp, " ? ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static bool interrupted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void sig_handler(int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) interrupted = sig == SIGINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (trace->multiple_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (trace->show_comm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) printed += fprintf(fp, "%d ", thread->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (trace->show_tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) printed = trace__fprintf_tstamp(trace, tstamp, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (trace->show_duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) printed += fprintf_duration(duration, duration_calculated, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return printed + trace__fprintf_comm_tid(trace, thread, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static int trace__process_event(struct trace *trace, struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) union perf_event *event, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) switch (event->header.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) case PERF_RECORD_LOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) color_fprintf(trace->output, PERF_COLOR_RED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) "LOST %" PRIu64 " events!\n", event->lost.lost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ret = machine__process_lost_event(machine, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ret = machine__process_event(machine, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) static int trace__tool_process(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct trace *trace = container_of(tool, struct trace, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return trace__process_event(trace, machine, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct machine *machine = vmachine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (machine->kptr_restrict_warned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (symbol_conf.kptr_restrict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) "Kernel samples will not be resolved.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) machine->kptr_restrict_warned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return machine__resolve_kernel_addr(vmachine, addrp, modp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) int err = symbol__init(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) trace->host = machine__new_host();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (trace->host == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) evlist->core.threads, trace__tool_process, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) symbol__exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static void trace__symbols__exit(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) machine__exit(trace->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) trace->host = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) symbol__exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) nr_args = sc->fmt->nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (sc->arg_fmt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) for (idx = 0; idx < nr_args; ++idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (sc->fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) sc->arg_fmt[idx] = sc->fmt->arg[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) sc->nr_args = nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) const struct syscall_arg_fmt *fmt = fmtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return strcmp(name, fmt->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static struct syscall_arg_fmt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) __syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static struct tep_format_field *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct tep_format_field *last_field = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) for (; field; field = field->next, ++arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) last_field = field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (arg->scnprintf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) len = strlen(field->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (strcmp(field->type, "const char *") == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) strstr(field->name, "path") != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) arg->scnprintf = SCA_FILENAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) arg->scnprintf = SCA_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) else if (strcmp(field->type, "pid_t") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) arg->scnprintf = SCA_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) else if (strcmp(field->type, "umode_t") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) arg->scnprintf = SCA_MODE_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) arg->scnprintf = SCA_CHAR_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) arg->nr_entries = field->arraylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) } else if ((strcmp(field->type, "int") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) strcmp(field->type, "unsigned int") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) strcmp(field->type, "long") == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * /sys/kernel/tracing/events/syscalls/sys_enter*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * 65 int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * 23 unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * 7 unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) arg->scnprintf = SCA_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) arg->scnprintf = fmt->scnprintf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) arg->strtoul = fmt->strtoul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return last_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int syscall__set_arg_fmts(struct syscall *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (last_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) sc->args_size = last_field->offset + last_field->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static int trace__read_syscall_info(struct trace *trace, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) char tp_name[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct syscall *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) const char *name = syscalltbl__name(trace->sctbl, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) #ifdef HAVE_SYSCALL_TABLE_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (trace->syscalls.table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (trace->syscalls.table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) // When using libaudit we don't know beforehand what is the max syscall id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) // Need to memset from offset 0 and +1 members if brand new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (trace->syscalls.table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) memset(table, 0, (id + 1) * sizeof(*sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) trace->syscalls.table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) trace->sctbl->syscalls.max_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) sc = trace->syscalls.table + id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (sc->nonexistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) sc->nonexistent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) sc->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) sc->fmt = syscall_fmt__find(sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) sc->tp_format = trace_event__tp_format("syscalls", tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) sc->tp_format = trace_event__tp_format("syscalls", tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (IS_ERR(sc->tp_format))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return PTR_ERR(sc->tp_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) sc->args = sc->tp_format->format.fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * We need to check and discard the first variable '__syscall_nr'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * or 'nr' that mean the syscall number. It is needless here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) sc->args = sc->args->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) --sc->nr_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return syscall__set_arg_fmts(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (fmt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static int intcmp(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) const int *one = a, *another = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return *one - *another;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static int trace__validate_ev_qualifier(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) bool printed_invalid_prefix = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct str_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) trace->ev_qualifier_ids.entries = malloc(nr_allocated *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) sizeof(trace->ev_qualifier_ids.entries[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (trace->ev_qualifier_ids.entries == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) strlist__for_each_entry(pos, trace->ev_qualifier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) const char *sc = pos->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) goto matches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (!printed_invalid_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) pr_debug("Skipping unknown syscalls: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) printed_invalid_prefix = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) pr_debug(", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) pr_debug("%s", sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) matches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) trace->ev_qualifier_ids.entries[nr_used++] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (match_next == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (nr_allocated == nr_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) void *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) nr_allocated += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) entries = realloc(trace->ev_qualifier_ids.entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (entries == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) fputs("\nError:\t Not enough memory for parsing\n", trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) trace->ev_qualifier_ids.entries = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) trace->ev_qualifier_ids.entries[nr_used++] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) trace->ev_qualifier_ids.nr = nr_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (printed_invalid_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) pr_debug("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) zfree(&trace->ev_qualifier_ids.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) trace->ev_qualifier_ids.nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) bool in_ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (trace->ev_qualifier_ids.nr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (in_ev_qualifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return !trace->not_ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return trace->not_ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * args is to be interpreted as a series of longs but we need to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * 8-byte unaligned accesses. args points to raw_data within the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * and raw_data is guaranteed to be 8-byte unaligned because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * preceded by raw_size which is a u32. So we need to copy args to a temp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * variable to read it. Most notably this avoids extended load instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * on unaligned addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) unsigned char *p = arg->args + sizeof(unsigned long) * idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) memcpy(&val, p, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct syscall_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return scnprintf(bf, size, "arg%d: ", arg->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * Check if the value is in fact zero, i.e. mask whatever needs masking, such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * as mount 'flags' argument that needs ignoring some magic flag, see comment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * in tools/perf/trace/beauty/mount_flags.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (fmt && fmt->mask_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) return fmt->mask_val(arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct syscall_arg *arg, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (fmt && fmt->scnprintf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) arg->val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (fmt->parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) arg->parm = fmt->parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return fmt->scnprintf(bf, size, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return scnprintf(bf, size, "%ld", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) unsigned char *args, void *augmented_args, int augmented_args_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) struct trace *trace, struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) u8 bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct syscall_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) .args = args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) .augmented = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) .size = augmented_args_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) .args = augmented_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) .idx = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) .mask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) .trace = trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) .thread = thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .show_string_prefix = trace->show_string_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) struct thread_trace *ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * Things like fcntl will set this in its 'cmd' formatter to pick the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * right formatter for the return value (an fd? file flags?), which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * not needed for syscalls that always return a given type, say an fd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ttrace->ret_scnprintf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (sc->args != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct tep_format_field *field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) for (field = sc->args; field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) field = field->next, ++arg.idx, bit <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (arg.mask & bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) arg.fmt = &sc->arg_fmt[arg.idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) val = syscall_arg__val(&arg, arg.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * Some syscall args need some mask, most don't and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * return val untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * Suppress this argument if its value is zero and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * and we don't have a string associated in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * strarray for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (val == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) !trace->show_zeros &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) !(sc->arg_fmt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) (sc->arg_fmt[arg.idx].show_zero ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) sc->arg_fmt[arg.idx].parm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (trace->show_arg_names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) bf + printed, size - printed, &arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) } else if (IS_ERR(sc->tp_format)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * If we managed to read the tracepoint /format file, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * may end up not having any args, like with gettid(), so only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * print the raw args when we didn't manage to read it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) while (arg.idx < sc->nr_args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (arg.mask & bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) goto next_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) val = syscall_arg__val(&arg, arg.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) printed += scnprintf(bf + printed, size - printed, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) next_arg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) ++arg.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) bit <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct perf_sample *sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static struct syscall *trace__syscall_info(struct trace *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct evsel *evsel, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * before that, leaving at a higher verbosity level till that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * explained. Reproduced with plain ftrace with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * echo 1 > /t/events/raw_syscalls/sys_exit/enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * grep "NR -1 " /t/trace_pipe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * After generating some load on the machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (verbose > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static u64 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) id, evsel__name(evsel), ++n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) #ifdef HAVE_SYSCALL_TABLE_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (id > trace->sctbl->syscalls.max_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (id >= trace->sctbl->syscalls.max_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * With libaudit we don't know beforehand what is the max_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * so we let trace__read_syscall_info() figure that out as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * go on reading syscalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) err = trace__read_syscall_info(trace, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) goto out_cant_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) (err = trace__read_syscall_info(trace, id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) goto out_cant_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (trace->syscalls.table[id].name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (trace->syscalls.table[id].nonexistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) goto out_cant_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return &trace->syscalls.table[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) out_cant_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (verbose > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) char sbuf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) fputs(" information\n", trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct syscall_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) u64 nr_failures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int max_errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) u32 *errnos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) int id, struct perf_sample *sample, long err, bool errno_summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct syscall_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) u64 duration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) inode = intlist__findnew(ttrace->syscall_stats, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (inode == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) stats = inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (stats == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) stats = malloc(sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (stats == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) stats->nr_failures = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) stats->max_errno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) stats->errnos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) init_stats(&stats->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) inode->priv = stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (ttrace->entry_time && sample->time > ttrace->entry_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) duration = sample->time - ttrace->entry_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) update_stats(&stats->stats, duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) ++stats->nr_failures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (!errno_summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) err = -err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (err > stats->max_errno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (new_errnos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) thread__comm_str(thread), thread->pid_, thread->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) stats->errnos = new_errnos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) stats->max_errno = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ++stats->errnos[err - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static int trace__printf_interrupted_entry(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) size_t printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (trace->failure_only || trace->current == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) ttrace = thread__priv(trace->current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (!ttrace->entry_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (len < trace->args_alignment - 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) printed += fprintf(trace->output, " ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) ttrace->entry_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) ++trace->nr_events_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct perf_sample *sample, struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) int printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (trace->print_sample) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) double ts = (double)sample->time / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) evsel__name(evsel), ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) thread__comm_str(thread),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) sample->pid, sample->tid, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) void *augmented_args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * and there we get all 6 syscall args plus the tracepoint common fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * that gets calculated at the start and the syscall_nr (another long).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * So we check if that is the case and if so don't look after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * sc->args_size but always after the full raw_syscalls:sys_enter payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * which is fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * We'll revisit this later to pass s->args_size to the BPF augmenter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * copies only what we need for each syscall, like what happens when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * traffic to just what is needed for each syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) int args_size = raw_augmented_args_size ?: sc->args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) *augmented_args_size = sample->raw_size - args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) if (*augmented_args_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) augmented_args = sample->raw_data + args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return augmented_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) void *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) int printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) int augmented_args_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) void *augmented_args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct syscall *sc = trace__syscall_info(trace, evsel, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (sc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ttrace = thread__trace(thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) trace__fprintf_sample(trace, evsel, sample, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) args = perf_evsel__sc_tp_ptr(evsel, args, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (ttrace->entry_str == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) ttrace->entry_str = malloc(trace__entry_str_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (!ttrace->entry_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) trace__printf_interrupted_entry(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * this breaks syscall__augmented_args() check for augmented args, as we calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * so when handling, say the openat syscall, we end up getting 6 args for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * thinking that the extra 2 u64 args are the augmented filename, so just check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (evsel != trace->syscalls.events.sys_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) ttrace->entry_time = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) msg = ttrace->entry_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) args, augmented_args, augmented_args_size, trace, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (sc->is_exit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) printed = fprintf(trace->output, "%s)", ttrace->entry_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (trace->args_alignment > printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) alignment = trace->args_alignment - printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) fprintf(trace->output, "%*s= ?\n", alignment, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ttrace->entry_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) /* See trace__vfs_getname & trace__sys_exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) ttrace->filename.pending_open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (trace->current != thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) thread__put(trace->current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) trace->current = thread__get(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) struct syscall *sc = trace__syscall_info(trace, evsel, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) char msg[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) void *args, *augmented_args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) int augmented_args_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (sc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) ttrace = thread__trace(thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) args = perf_evsel__sc_tp_ptr(evsel, args, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) fprintf(trace->output, "%s", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) struct callchain_cursor *cursor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) int max_stack = evsel->core.attr.sample_max_stack ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) evsel->core.attr.sample_max_stack :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) trace->max_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (machine__resolve(trace->host, &al, sample) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) addr_location__put(&al);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) /* TODO: user-configurable print_opts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) const unsigned int print_opts = EVSEL__PRINT_SYM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) EVSEL__PRINT_DSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) EVSEL__PRINT_UNKNOWN_AS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) static const char *errno_to_name(struct evsel *evsel, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) struct perf_env *env = evsel__env(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) const char *arch_name = perf_env__arch(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return arch_syscalls__strerrno(arch_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) u64 duration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) bool duration_calculated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) int alignment = trace->args_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) struct syscall *sc = trace__syscall_info(trace, evsel, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (sc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) ttrace = thread__trace(thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) trace__fprintf_sample(trace, evsel, sample, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (trace->summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) trace__set_fd_pathname(thread, ret, ttrace->filename.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) ttrace->filename.pending_open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) ++trace->stats.vfs_getname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (ttrace->entry_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) duration = sample->time - ttrace->entry_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (trace__filter_duration(trace, duration))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) duration_calculated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) } else if (trace->duration_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (sample->callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (callchain_ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (callchain_cursor.nr < trace->min_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) callchain_ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (trace->summary_only || (ret >= 0 && trace->failure_only))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (ttrace->entry_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) printed = fprintf(trace->output, "%s", ttrace->entry_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) printed += fprintf(trace->output, " ... [");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) printed += 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) printed += fprintf(trace->output, "]: %s()", sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) printed++; /* the closing ')' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (alignment > printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) alignment -= printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) fprintf(trace->output, ")%*s= ", alignment, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (sc->fmt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) goto errno_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) signed_print:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) fprintf(trace->output, "%ld", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) } else if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) errno_print: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) char bf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) *e = errno_to_name(evsel, -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) fprintf(trace->output, "-1 %s (%s)", e, emsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) } else if (ret == 0 && sc->fmt->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) fprintf(trace->output, "0 (Timeout)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) else if (ttrace->ret_scnprintf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) char bf[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) struct syscall_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) .val = ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) .thread = thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) .trace = trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) ttrace->ret_scnprintf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) fprintf(trace->output, "%s", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) } else if (sc->fmt->hexret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) fprintf(trace->output, "%#lx", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) else if (sc->fmt->errpid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) struct thread *child = machine__find_thread(trace->host, ret, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (child != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) fprintf(trace->output, "%ld", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (child->comm_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) fprintf(trace->output, " (%s)", thread__comm_str(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) thread__put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) goto signed_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) fputc('\n', trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * We only consider an 'event' for the sake of --max-events a non-filtered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * sys_enter + sys_exit and other tracepoint events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) interrupted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (callchain_ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) trace__fprintf_callchain(trace, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) else if (callchain_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) ttrace->entry_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) size_t filename_len, entry_str_len, to_move;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) ssize_t remaining_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) char *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) const char *filename = evsel__rawptr(evsel, sample, "pathname");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (!thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (!ttrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) filename_len = strlen(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (filename_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (ttrace->filename.namelen < filename_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) char *f = realloc(ttrace->filename.name, filename_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) ttrace->filename.namelen = filename_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) ttrace->filename.name = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) strcpy(ttrace->filename.name, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) ttrace->filename.pending_open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (!ttrace->filename.ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) entry_str_len = strlen(ttrace->entry_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (remaining_space <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (filename_len > (size_t)remaining_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) filename += filename_len - remaining_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) filename_len = remaining_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) memmove(pos + filename_len, pos, to_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) memcpy(pos, filename, filename_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) ttrace->filename.ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ttrace->filename.entry_str_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) u64 runtime = evsel__intval(evsel, sample, "runtime");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) double runtime_ms = (double)runtime / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct thread *thread = machine__findnew_thread(trace->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) sample->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) struct thread_trace *ttrace = thread__trace(thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) goto out_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) ttrace->runtime_ms += runtime_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) trace->runtime_ms += runtime_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) out_dump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) evsel->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) evsel__strval(evsel, sample, "comm"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) (pid_t)evsel__intval(evsel, sample, "pid"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) evsel__intval(evsel, sample, "vruntime"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) static int bpf_output__printer(enum binary_printer_ops op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) unsigned int val, void *extra __maybe_unused, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) unsigned char ch = (unsigned char)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) case BINARY_PRINT_CHAR_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return fprintf(fp, "%c", isprint(ch) ? ch : '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) case BINARY_PRINT_DATA_BEGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) case BINARY_PRINT_LINE_BEGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) case BINARY_PRINT_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) case BINARY_PRINT_NUM_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) case BINARY_PRINT_NUM_PAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) case BINARY_PRINT_SEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) case BINARY_PRINT_CHAR_PAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) case BINARY_PRINT_LINE_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) case BINARY_PRINT_DATA_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static void bpf_output__fprintf(struct trace *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) binary__fprintf(sample->raw_data, sample->raw_size, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) bpf_output__printer, NULL, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) ++trace->nr_events_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct thread *thread, void *augmented_args, int augmented_args_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) char bf[2048];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) size_t size = sizeof(bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) struct tep_format_field *field = evsel->tp_format->format.fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) u8 bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct syscall_arg syscall_arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) .augmented = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) .size = augmented_args_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) .args = augmented_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) .idx = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) .mask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) .trace = trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) .thread = thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) .show_string_prefix = trace->show_string_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (syscall_arg.mask & bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) syscall_arg.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) syscall_arg.fmt = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (field->flags & TEP_FIELD_IS_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) int offset = field->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (field->flags & TEP_FIELD_IS_DYNAMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) offset = format_field__intval(field, sample, evsel->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) syscall_arg.len = offset >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) offset &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) val = (uintptr_t)(sample->raw_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) val = format_field__intval(field, sample, evsel->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * Some syscall args need some mask, most don't and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * return val untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) * Suppress this argument if its value is zero and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * and we don't have a string associated in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * strarray for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (val == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) !trace->show_zeros &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) !((arg->show_zero ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) arg->scnprintf == SCA_STRARRAY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) arg->scnprintf == SCA_STRARRAYS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) arg->parm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * XXX Perhaps we should have a show_tp_arg_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) * leaving show_arg_names just for syscalls?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (1 || trace->show_arg_names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return printed + fprintf(trace->output, "%s", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) static int trace__event_handler(struct trace *trace, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) int callchain_ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * Check if we called perf_evsel__disable(evsel) due to, for instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * this event's max_events having been hit and this is an entry coming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * from the ring buffer that we should discard, since the max events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * have already been considered/printed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (evsel->disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (sample->callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (callchain_ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (callchain_cursor.nr < trace->min_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) callchain_ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) trace__printf_interrupted_entry(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) trace__fprintf_tstamp(trace, sample->time, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (trace->trace_syscalls && trace->show_duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) fprintf(trace->output, "( ): ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) if (thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) trace__fprintf_comm_tid(trace, thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (evsel == trace->syscalls.events.augmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int id = perf_evsel__sc_tp_uint(evsel, id, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) struct syscall *sc = trace__syscall_info(trace, evsel, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) if (sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) fprintf(trace->output, "%s(", sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) trace__fprintf_sys_enter(trace, evsel, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) fputc(')', trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) goto newline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * XXX: Not having the associated syscall info or not finding/adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * the thread should never happen, but if it does...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * fall thru and print it as a bpf_output event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) fprintf(trace->output, "%s(", evsel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (evsel__is_bpf_output(evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) bpf_output__fprintf(trace, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) } else if (evsel->tp_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) trace__fprintf_sys_enter(trace, evsel, sample)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (trace->libtraceevent_print) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) event_format__fprintf(evsel->tp_format, sample->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) sample->raw_data, sample->raw_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) newline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) fprintf(trace->output, ")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (callchain_ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) trace__fprintf_callchain(trace, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) else if (callchain_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ++trace->nr_events_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) evsel__disable(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) evsel__close(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) static void print_location(FILE *f, struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) struct addr_location *al,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) bool print_dso, bool print_sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if ((verbose > 0 || print_dso) && al->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) fprintf(f, "%s@", al->map->dso->long_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if ((verbose > 0 || print_sym) && al->sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) fprintf(f, "%s+0x%" PRIx64, al->sym->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) al->addr - al->sym->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) else if (al->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) fprintf(f, "0x%" PRIx64, al->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) fprintf(f, "0x%" PRIx64, sample->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) static int trace__pgfault(struct trace *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) char map_type = 'd';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) struct thread_trace *ttrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) int callchain_ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (sample->callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (callchain_ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (callchain_cursor.nr < trace->min_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) callchain_ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ttrace = thread__trace(thread, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) ttrace->pfmaj++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) ttrace->pfmin++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (trace->summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) fprintf(trace->output, "%sfault [",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) "maj" : "min");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) print_location(trace->output, sample, &al, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) fprintf(trace->output, "] => ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) if (!al.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (al.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) map_type = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) map_type = '?';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) print_location(trace->output, sample, &al, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) fprintf(trace->output, " (%c%c)\n", map_type, al.level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (callchain_ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) trace__fprintf_callchain(trace, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) else if (callchain_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) ++trace->nr_events_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) static void trace__set_base_time(struct trace *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) * and don't use sample->time unconditionally, we may end up having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) * some other event in the future without PERF_SAMPLE_TIME for good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) * reason, i.e. we may not be interested in its timestamps, just in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * it taking place, picking some piece of information when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * appears in our event stream (vfs_getname comes to mind).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (trace->base_time == 0 && !trace->full_time &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) trace->base_time = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static int trace__process_sample(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) struct trace *trace = container_of(tool, struct trace, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) tracepoint_handler handler = evsel->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (thread && thread__is_filtered(thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) trace__set_base_time(trace, evsel, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) ++trace->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) handler(trace, evsel, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) static int trace__record(struct trace *trace, int argc, const char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) unsigned int rec_argc, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) const char **rec_argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) const char * const record_args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) "record",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) "-R",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) "-m", "1024",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) "-c", "1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) pid_t pid = getpid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) char *filter = asprintf__tp_filter_pids(1, &pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) const char * const sc_args[] = { "-e", };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) const char * const majpf_args[] = { "-e", "major-faults" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) const char * const minpf_args[] = { "-e", "minor-faults" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /* +3 is for the event string below and the pid filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) majpf_args_nr + minpf_args_nr + argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) rec_argv = calloc(rec_argc + 1, sizeof(char *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (rec_argv == NULL || filter == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) for (i = 0; i < ARRAY_SIZE(record_args); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) rec_argv[j++] = record_args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (trace->trace_syscalls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) for (i = 0; i < sc_args_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) rec_argv[j++] = sc_args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) /* event string may be different for older kernels - e.g., RHEL6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (is_valid_tracepoint("raw_syscalls:sys_enter"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) else if (is_valid_tracepoint("syscalls:sys_enter"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) pr_err("Neither raw_syscalls nor syscalls events exist.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) rec_argv[j++] = "--filter";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) rec_argv[j++] = filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (trace->trace_pgfaults & TRACE_PFMAJ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) for (i = 0; i < majpf_args_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) rec_argv[j++] = majpf_args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (trace->trace_pgfaults & TRACE_PFMIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) for (i = 0; i < minpf_args_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) rec_argv[j++] = minpf_args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) for (i = 0; i < (unsigned int)argc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) rec_argv[j++] = argv[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) err = cmd_record(j, rec_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) free(filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) free(rec_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) static bool evlist__add_vfs_getname(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct evsel *evsel, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) struct parse_events_error err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) bzero(&err, sizeof(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) ret = parse_events(evlist, "probe:vfs_getname*", &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) free(err.str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) free(err.help);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) free(err.first_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) free(err.first_help);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) evlist__for_each_entry_safe(evlist, evsel, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (evsel__field(evsel, "pathname")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) evsel->handler = trace__vfs_getname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) list_del_init(&evsel->core.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) evsel->evlist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) evsel__delete(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) static struct evsel *evsel__new_pgfault(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct perf_event_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) .type = PERF_TYPE_SOFTWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) .mmap_data = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) attr.config = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) attr.sample_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) event_attr_init(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) evsel = evsel__new(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) if (evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) evsel->handler = trace__pgfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) const u32 type = event->header.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (type != PERF_RECORD_SAMPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) trace__process_event(trace, trace->host, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (evsel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) if (evswitch__discard(&trace->evswitch, evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) trace__set_base_time(trace, evsel, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) sample->raw_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) evsel__name(evsel), sample->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) sample->cpu, sample->raw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) tracepoint_handler handler = evsel->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) handler(trace, evsel, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) interrupted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) static int trace__add_syscall_newtp(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) struct evlist *evlist = trace->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) struct evsel *sys_enter, *sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (sys_enter == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) goto out_delete_sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (sys_exit == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) goto out_delete_sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) goto out_delete_sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) evlist__add(evlist, sys_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) evlist__add(evlist, sys_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (callchain_param.enabled && !trace->kernel_syscallchains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) * We're interested only in the user space callchain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) * leading to the syscall, allow overriding that for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) * debugging reasons using --kernel_syscall_callchains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) sys_exit->core.attr.exclude_callchain_kernel = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) trace->syscalls.events.sys_enter = sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) trace->syscalls.events.sys_exit = sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) out_delete_sys_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) evsel__delete_priv(sys_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) out_delete_sys_enter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) evsel__delete_priv(sys_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct evsel *sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) trace->ev_qualifier_ids.nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) trace->ev_qualifier_ids.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (filter == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) goto out_enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) sys_exit = trace->syscalls.events.sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) err = evsel__append_tp_filter(sys_exit, filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) free(filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) out_enomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) errno = ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) #ifdef HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (trace->bpf_obj == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) return bpf_object__find_map_by_name(trace->bpf_obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) static void trace__set_bpf_map_filtered_pids(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) static void trace__set_bpf_map_syscalls(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (trace->bpf_obj == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) return bpf_object__find_program_by_title(trace->bpf_obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) const char *prog_name, const char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (prog_name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) char default_prog_name[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) prog = trace__find_bpf_program_by_title(trace, default_prog_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) if (prog != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if (sc->fmt && sc->fmt->alias) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) prog = trace__find_bpf_program_by_title(trace, default_prog_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (prog != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) goto out_unaugmented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) prog = trace__find_bpf_program_by_title(trace, prog_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (prog != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) out_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) prog_name, type, sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) out_unaugmented:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) return trace->syscalls.unaugmented_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) struct syscall *sc = trace__syscall_info(trace, NULL, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (sc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) struct syscall *sc = trace__syscall_info(trace, NULL, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct syscall *sc = trace__syscall_info(trace, NULL, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) struct syscall *sc = trace__syscall_info(trace, NULL, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) int arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (sc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) for (; arg < sc->nr_args; ++arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) entry->string_args_len[arg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) /* Should be set like strace -s strsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) entry->string_args_len[arg] = PATH_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) for (; arg < 6; ++arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) entry->string_args_len[arg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) int fd = bpf_map__fd(trace->syscalls.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) struct bpf_map_syscall_entry value = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) .enabled = !trace->not_ev_qualifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) int key = trace->ev_qualifier_ids.entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (value.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) trace__init_bpf_map_syscall_args(trace, key, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) trace__init_syscall_bpf_progs(trace, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) int fd = bpf_map__fd(trace->syscalls.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) struct bpf_map_syscall_entry value = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) .enabled = enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) int err = 0, key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) trace__init_bpf_map_syscall_args(trace, key, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) static int trace__init_syscalls_bpf_map(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) bool enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (trace->ev_qualifier_ids.nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) enabled = trace->not_ev_qualifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) return __trace__init_syscalls_bpf_map(trace, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) struct tep_format_field *field, *candidate_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) * We're only interested in syscalls that have a pointer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) for (field = sc->args; field; field = field->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (field->flags & TEP_FIELD_IS_POINTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) goto try_to_find_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) try_to_find_pair:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) struct syscall *pair = trace__syscall_info(trace, NULL, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) struct bpf_program *pair_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) bool is_candidate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) if (pair == NULL || pair == sc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) for (field = sc->args, candidate_field = pair->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (is_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (!candidate_is_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (candidate_is_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) // The candidate might copy a pointer we don't have, skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) goto next_candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) if (strcmp(field->type, candidate_field->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) goto next_candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) is_candidate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (!is_candidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) goto next_candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) * Check if the tentative pair syscall augmenter has more pointers, if it has,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) * then it may be collecting that and we then can't use it, as it would collect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) * more than what is common to the two syscalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) if (candidate_field) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) if (candidate_field->flags & TEP_FIELD_IS_POINTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) goto next_candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) pair_prog = pair->bpf_prog.sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) * If the pair isn't enabled, then its bpf_prog.sys_enter will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) * have been searched for, so search it here and if it returns the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) * unaugmented one, then ignore it, otherwise we'll reuse that BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) * program for a filtered syscall on a non-filtered one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) * For instance, we have "!syscalls:sys_enter_renameat" and that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) * useful for "renameat2".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) if (pair_prog == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) if (pair_prog == trace->syscalls.unaugmented_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) goto next_candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) return pair_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) next_candidate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) int err = 0, key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) int prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) if (!trace__syscall_enabled(trace, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) trace__init_syscall_bpf_progs(trace, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) // It'll get at least the "!raw_syscalls:unaugmented"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) * Now lets do a second pass looking for enabled syscalls without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) * an augmenter that have a signature that is a superset of another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) * syscall with an augmenter so that we can auto-reuse it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) * I.e. if we have an augmenter for the "open" syscall that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) * this signature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) * int open(const char *pathname, int flags, mode_t mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) * I.e. that will collect just the first string argument, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) * can reuse it for the 'creat' syscall, that has this signature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) * int creat(const char *pathname, mode_t mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) * and for:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) * int stat(const char *pathname, struct stat *statbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) * int lstat(const char *pathname, struct stat *statbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) * Because the 'open' augmenter will collect the first arg as a string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) * and leave alone all the other args, which already helps with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) * beautifying 'stat' and 'lstat''s pathname arg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) * Then, in time, when 'stat' gets an augmenter that collects both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) * first and second arg (this one on the raw_syscalls:sys_exit prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) * array tail call, then that one will be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) struct syscall *sc = trace__syscall_info(trace, NULL, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) struct bpf_program *pair_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) int prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) * For now we're just reusing the sys_enter prog, and if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) * already has an augmenter, we don't need to find one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) * Look at all the other syscalls for one that has a signature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) * that is close enough that we can share:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (pair_prog == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) sc->bpf_prog.sys_enter = pair_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) * with the fd for the program we're reusing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) static void trace__delete_augmented_syscalls(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) struct evsel *evsel, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) evlist__remove(trace->evlist, trace->syscalls.events.augmented);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) evsel__delete(trace->syscalls.events.augmented);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) trace->syscalls.events.augmented = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (evsel->bpf_obj == trace->bpf_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) evlist__remove(trace->evlist, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) evsel__delete(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) bpf_object__close(trace->bpf_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) trace->bpf_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) #else // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) const char *name __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) const char *name __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) #endif // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) evlist__for_each_entry(trace->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) if (evsel == trace->syscalls.events.augmented ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) evsel->bpf_obj == trace->bpf_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) static int trace__set_ev_qualifier_filter(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) if (trace->syscalls.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) return trace__set_ev_qualifier_bpf_filter(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) if (trace->syscalls.events.sys_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) return trace__set_ev_qualifier_tp_filter(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) size_t npids __maybe_unused, pid_t *pids __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) #ifdef HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) bool value = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) int map_fd = bpf_map__fd(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) for (i = 0; i < npids; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) static int trace__set_filter_loop_pids(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) unsigned int nr = 1, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) pid_t pids[32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) getpid(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) while (thread && nr < ARRAY_SIZE(pids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) if (parent == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) if (!strcmp(thread__comm_str(parent), "sshd") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) strstarts(thread__comm_str(parent), "gnome-terminal")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) pids[nr++] = parent->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) thread = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (!err && trace->filter_pids.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) static int trace__set_filter_pids(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) * Better not use !target__has_task() here because we need to cover the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * case where no threads were specified in the command line, but a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) * workload was, and in that case we will fill in the thread_map when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * we fork the workload in perf_evlist__prepare_workload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) if (trace->filter_pids.nr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) trace->filter_pids.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (!err && trace->filter_pids.map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) trace->filter_pids.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) err = trace__set_filter_loop_pids(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) static int __trace__deliver_event(struct trace *trace, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) struct evlist *evlist = trace->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) struct perf_sample sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) err = perf_evlist__parse_sample(evlist, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) trace__handle_event(trace, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) static int __trace__flush_events(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) u64 first = ordered_events__first_time(&trace->oe.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) u64 flush = trace->oe.last - NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) /* Is there some thing to flush.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (first && first < flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) return ordered_events__flush_time(&trace->oe.data, flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) static int trace__flush_events(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) return !trace->sort_events ? 0 : __trace__flush_events(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) static int trace__deliver_event(struct trace *trace, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) if (!trace->sort_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) return __trace__deliver_event(trace, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) if (err && err != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) return trace__flush_events(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) static int ordered_events__deliver_event(struct ordered_events *oe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) struct ordered_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) struct trace *trace = container_of(oe, struct trace, oe.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) return __trace__deliver_event(trace, event->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) struct tep_format_field *field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (evsel->tp_format == NULL || fmt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) if (strcmp(field->name, arg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) return fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) char *tok, *left = evsel->filter, *new_filter = evsel->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) while ((tok = strpbrk(left, "=<>!")) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) char *right = tok + 1, *right_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if (*right == '=')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) ++right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) while (isspace(*right))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) ++right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) if (*right == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) while (!isalpha(*left))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (++left == tok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) * Bail out, can't find the name of the argument that is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) * used in the filter, let it try to set this filter, will fail later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) right_end = right + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) ++right_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) if (isalpha(*right)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) struct syscall_arg_fmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) int left_size = tok - left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) right_size = right_end - right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) char arg[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) while (isspace(left[left_size - 1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) --left_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) if (fmt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) arg, evsel->name, evsel->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) arg, (int)(right - tok), tok, right_size, right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (fmt->strtoul) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) struct syscall_arg syscall_arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) .parm = fmt->parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) char *n, expansion[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) int expansion_offset = right - new_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) pr_debug("%s", expansion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) pr_debug(" out of memory!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) free(new_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) if (new_filter != evsel->filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) free(new_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) left = n + expansion_offset + expansion_lenght;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) new_filter = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) right_size, right, arg, evsel->name, evsel->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) arg, evsel->name, evsel->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) pr_debug("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) left = right_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) if (new_filter != evsel->filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) evsel__set_filter(evsel, new_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) free(new_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) struct evlist *evlist = trace->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (evsel->filter == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (trace__expand_filter(trace, evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) *err_evsel = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) static int trace__run(struct trace *trace, int argc, const char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) struct evlist *evlist = trace->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) int err = -1, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) unsigned long before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) const bool forks = argc > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) bool draining = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) trace->live = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) if (!trace->raw_augmented_syscalls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) goto out_error_raw_syscalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (trace->trace_syscalls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) trace->vfs_getname = evlist__add_vfs_getname(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (pgfault_maj == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) goto out_error_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) evlist__add(evlist, pgfault_maj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if ((trace->trace_pgfaults & TRACE_PFMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (pgfault_min == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) goto out_error_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) evlist__add(evlist, pgfault_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) if (trace->sched &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) goto out_error_sched_stat_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) * If a global cgroup was set, apply it to all the events without an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) * explicit cgroup. I.e.:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) * trace -G A -e sched:*switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) * _and_ sched:sched_switch to the 'A' cgroup, while:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) * trace -e sched:*switch -G A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) * will only set the sched:sched_switch event to the 'A' cgroup, all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) * a cgroup (on the root cgroup, sys wide, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) * Multiple cgroups:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) * trace -G A -e sched:*switch -G B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) * to the 'B' cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) * evlist__set_default_cgroup() grabs a reference of the passed cgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (trace->cgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) evlist__set_default_cgroup(trace->evlist, trace->cgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) err = perf_evlist__create_maps(evlist, &trace->opts.target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) err = trace__symbols_init(trace, evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) fprintf(trace->output, "Problems initializing symbol libraries!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) perf_evlist__config(evlist, &trace->opts, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) signal(SIGCHLD, sig_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) signal(SIGINT, sig_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) if (forks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) argv, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) fprintf(trace->output, "Couldn't run the workload!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) err = evlist__open(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) goto out_error_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) err = bpf__apply_obj_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) char errbuf[BUFSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) pr_err("ERROR: Apply config to BPF failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) goto out_error_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) err = trace__set_filter_pids(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) goto out_error_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) if (trace->syscalls.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) trace__init_syscalls_bpf_map(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (trace->syscalls.prog_array.sys_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) trace__init_syscalls_bpf_prog_array_maps(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) if (trace->ev_qualifier_ids.nr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) err = trace__set_ev_qualifier_filter(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) goto out_errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) if (trace->syscalls.events.sys_exit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) pr_debug("event qualifier tracepoint filter: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) trace->syscalls.events.sys_exit->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) * If the "close" syscall is not traced, then we will not have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) * fd->pathname table and were ending up showing the last value set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) * syscalls opening a pathname and associating it with a descriptor or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) * reading it from /proc/pid/fd/ in cases where that doesn't make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) * sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * not in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) err = trace__expand_filters(trace, &evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) err = perf_evlist__apply_filters(evlist, &evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) goto out_error_apply_filters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) if (trace->dump.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) bpf_map__fprintf(trace->dump.map, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) err = evlist__mmap(evlist, trace->opts.mmap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) goto out_error_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) evlist__enable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (forks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) perf_evlist__start_workload(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) if (trace->opts.initial_delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) usleep(trace->opts.initial_delay * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) evlist__enable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) evlist->core.threads->nr > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) evlist__first(evlist)->core.attr.inherit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) * Now that we already used evsel->core.attr to ask the kernel to setup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) * trace__resolve_callchain(), allowing per-event max-stack settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) * to override an explicitly set --max-stack global setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) if (evsel__has_callchain(evsel) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) evsel->core.attr.sample_max_stack == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) evsel->core.attr.sample_max_stack = trace->max_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) before = trace->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) for (i = 0; i < evlist->core.nr_mmaps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) struct mmap *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) md = &evlist->mmap[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) if (perf_mmap__read_init(&md->core) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) while ((event = perf_mmap__read_event(&md->core)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) ++trace->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) err = trace__deliver_event(trace, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) perf_mmap__consume(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) if (interrupted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (done && !draining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) evlist__disable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) draining = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) perf_mmap__read_done(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) if (trace->nr_events == before) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) int timeout = done ? 100 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) if (!draining && evlist__poll(evlist, timeout) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) draining = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (trace__flush_events(trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) thread__zput(trace->current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) evlist__disable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) if (trace->sort_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) if (trace->summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) trace__fprintf_thread_summary(trace, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) if (trace->show_tool_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) fprintf(trace->output, "Stats:\n "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) " vfs_getname : %" PRIu64 "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) " proc_getname: %" PRIu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) trace->stats.vfs_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) trace->stats.proc_getname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) out_delete_evlist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) trace__symbols__exit(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) evlist__delete(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) cgroup__put(trace->cgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) trace->evlist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) trace->live = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) char errbuf[BUFSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) out_error_sched_stat_runtime:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) out_error_raw_syscalls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) out_error_mmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) out_error_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) fprintf(trace->output, "%s\n", errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) out_error_apply_filters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) fprintf(trace->output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) "Failed to set filter \"%s\" on event %s with %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) evsel->filter, evsel__name(evsel), errno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) str_error_r(errno, errbuf, sizeof(errbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) out_error_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) fprintf(trace->output, "Not enough memory to run!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) out_errno:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) static int trace__replay(struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) const struct evsel_str_handler handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) { "probe:vfs_getname", trace__vfs_getname, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) struct perf_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) .path = input_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) .mode = PERF_DATA_MODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) .force = trace->force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) trace->tool.sample = trace__process_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) trace->tool.mmap = perf_event__process_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) trace->tool.mmap2 = perf_event__process_mmap2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) trace->tool.comm = perf_event__process_comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) trace->tool.exit = perf_event__process_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) trace->tool.fork = perf_event__process_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) trace->tool.attr = perf_event__process_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) trace->tool.tracing_data = perf_event__process_tracing_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) trace->tool.build_id = perf_event__process_build_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) trace->tool.namespaces = perf_event__process_namespaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) trace->tool.ordered_events = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) trace->tool.ordering_requires_timestamps = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) /* add tid to output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) trace->multiple_threads = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) session = perf_session__new(&data, false, &trace->tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (IS_ERR(session))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) return PTR_ERR(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) if (trace->opts.target.pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) if (trace->opts.target.tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) if (symbol__init(&session->header.env) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) trace->host = &session->machines.host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) err = perf_session__set_tracepoints_handlers(session, handlers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) "raw_syscalls:sys_enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) /* older kernels have syscalls tp versus raw_syscalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) if (evsel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) "syscalls:sys_enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) if (evsel &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) pr_err("Error during initialize raw_syscalls:sys_enter event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) "raw_syscalls:sys_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) if (evsel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) "syscalls:sys_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) if (evsel &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) pr_err("Error during initialize raw_syscalls:sys_exit event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) evsel->handler = trace__pgfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) setup_pager();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) err = perf_session__process_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) pr_err("Failed to process events, error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) else if (trace->summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) trace__fprintf_thread_summary(trace, trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) perf_session__delete(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) static size_t trace__fprintf_threads_header(FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) size_t printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) printed = fprintf(fp, "\n Summary of events:\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) struct syscall_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) double msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) int syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) struct int_node *source = rb_entry(nd, struct int_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) struct syscall_stats *stats = source->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) entry->syscall = source->i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) entry->stats = stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) static size_t thread__dump_stats(struct thread_trace *ttrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) struct trace *trace, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) struct syscall *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) struct rb_node *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) if (syscall_stats == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) printed += fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) resort_rb__for_each_entry(nd, syscall_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) struct syscall_stats *stats = syscall_stats_entry->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) double avg = avg_stats(&stats->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) double pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) u64 n = (u64)stats->stats.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) avg /= NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) sc = &trace->syscalls.table[syscall_stats_entry->syscall];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) printed += fprintf(fp, " %-15s", sc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (trace->errno_summary && stats->nr_failures) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) const char *arch_name = perf_env__arch(trace->host->env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) int e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) for (e = 0; e < stats->max_errno; ++e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) if (stats->errnos[e] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) resort_rb__delete(syscall_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) printed += fprintf(fp, "\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) size_t printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) struct thread_trace *ttrace = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) double ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) if (ttrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) printed += fprintf(fp, "%.1f%%", ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) if (ttrace->pfmaj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) if (ttrace->pfmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (trace->sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) else if (fputc('\n', fp) != EOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) ++printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) printed += thread__dump_stats(ttrace, trace, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) static unsigned long thread__nr_events(struct thread_trace *ttrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) return ttrace ? ttrace->nr_events : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) entry->thread = rb_entry(nd, struct thread, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) size_t printed = trace__fprintf_threads_header(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) struct rb_node *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) for (i = 0; i < THREADS__TABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (threads == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) fprintf(fp, "%s", "Error sorting output by nr_events!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) resort_rb__for_each_entry(nd, threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) resort_rb__delete(threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) return printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) static int trace__set_duration(const struct option *opt, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) int unset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) struct trace *trace = opt->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) trace->duration_filter = atof(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) int unset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) struct trace *trace = opt->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) * FIXME: introduce a intarray class, plain parse csv and create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) * { int nr, int entries[] } struct...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) struct intlist *list = intlist__new(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (list == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) trace->filter_pids.entries = calloc(i, sizeof(pid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) if (trace->filter_pids.entries == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) trace->filter_pids.entries[0] = getpid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) for (i = 1; i < trace->filter_pids.nr; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) intlist__delete(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) static int trace__open_output(struct trace *trace, const char *filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) struct stat st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) if (!stat(filename, &st) && st.st_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) char oldname[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) scnprintf(oldname, sizeof(oldname), "%s.old", filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) unlink(oldname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) rename(filename, oldname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) trace->output = fopen(filename, "w");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return trace->output == NULL ? -errno : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) static int parse_pagefaults(const struct option *opt, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) int unset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) int *trace_pgfaults = opt->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (strcmp(str, "all") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) else if (strcmp(str, "maj") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) *trace_pgfaults |= TRACE_PFMAJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) else if (strcmp(str, "min") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) *trace_pgfaults |= TRACE_PFMIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (evsel->handler == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) evsel->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) if (fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) struct syscall_fmt *scfmt = syscall_fmt__find(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) if (scfmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) int skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) ++skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) static int evlist__set_syscall_tp_fields(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) if (evsel->priv || !evsel->tp_format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) if (strcmp(evsel->tp_format->system, "syscalls")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) evsel__init_tp_arg_scnprintf(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) if (evsel__init_syscall_tp(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) struct syscall_tp *sc = __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) struct syscall_tp *sc = __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) * XXX: Hackish, just splitting the combined -e+--event (syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * existing facilities unchanged (trace->ev_qualifier + parse_options()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) * It'd be better to introduce a parse_options() variant that would return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) * list with the terms it didn't match to an event...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) static int trace__parse_events_option(const struct option *opt, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) int unset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) struct trace *trace = (struct trace *)opt->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) const char *s = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) char *sep = NULL, *lists[2] = { NULL, NULL, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) int len = strlen(str) + 1, err = -1, list, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) char group_name[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) struct syscall_fmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) if (strace_groups_dir == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) if (*s == '!') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) ++s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) trace->not_ev_qualifier = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) if ((sep = strchr(s, ',')) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) *sep = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (syscalltbl__id(trace->sctbl, s) >= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) goto do_concat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) fmt = syscall_fmt__find_by_alias(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) if (fmt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) s = fmt->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) path__join(group_name, sizeof(group_name), strace_groups_dir, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) if (access(group_name, R_OK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) do_concat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) if (lists[list]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) sprintf(lists[list] + strlen(lists[list]), ",%s", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) lists[list] = malloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) if (lists[list] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) strcpy(lists[list], s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) if (!sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) *sep = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) s = sep + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) if (lists[1] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) struct strlist_config slist_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) .dirname = strace_groups_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) trace->ev_qualifier = strlist__new(lists[1], &slist_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) if (trace->ev_qualifier == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) fputs("Not enough memory to parse event qualifier", trace->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) if (trace__validate_ev_qualifier(trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) trace->trace_syscalls = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) if (lists[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) struct option o = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) .value = &trace->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) err = parse_events_option(&o, lists[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) if (sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) *sep = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) struct trace *trace = opt->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) if (!list_empty(&trace->evlist->core.entries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) struct option o = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) .value = &trace->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) return parse_cgroups(&o, str, unset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) static int trace__config(const char *var, const char *value, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) struct trace *trace = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) if (!strcmp(var, "trace.add_events")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) trace->perfconfig_events = strdup(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) if (trace->perfconfig_events == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) pr_err("Not enough memory for %s\n", "trace.add_events");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) } else if (!strcmp(var, "trace.show_timestamp")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) trace->show_tstamp = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) } else if (!strcmp(var, "trace.show_duration")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) trace->show_duration = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) } else if (!strcmp(var, "trace.show_arg_names")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) trace->show_arg_names = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (!trace->show_arg_names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) trace->show_zeros = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) } else if (!strcmp(var, "trace.show_zeros")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) bool new_show_zeros = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) if (!trace->show_arg_names && !new_show_zeros) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) trace->show_zeros = new_show_zeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) } else if (!strcmp(var, "trace.show_prefix")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) trace->show_string_prefix = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) } else if (!strcmp(var, "trace.no_inherit")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) trace->opts.no_inherit = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) } else if (!strcmp(var, "trace.args_alignment")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) int args_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) if (perf_config_int(&args_alignment, var, value) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) trace->args_alignment = args_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) if (strcasecmp(value, "libtraceevent") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) trace->libtraceevent_print = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) else if (strcasecmp(value, "libbeauty") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) trace->libtraceevent_print = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) int cmd_trace(int argc, const char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) const char *trace_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) "perf trace [<options>] [<command>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) "perf trace [<options>] -- <command> [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) "perf trace record [<options>] [<command>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) "perf trace record [<options>] -- <command> [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) struct trace trace = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) .opts = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) .target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) .uid = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) .uses_mmap = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) .user_freq = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) .user_interval = ULLONG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) .no_buffering = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) .mmap_pages = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) .output = stderr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) .show_comm = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) .show_tstamp = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) .show_duration = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) .show_arg_names = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) .args_alignment = 70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) .trace_syscalls = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) .kernel_syscallchains = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) .max_stack = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) .max_events = ULONG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) const char *map_dump_str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) const char *output_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) const struct option trace_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) OPT_CALLBACK('e', "event", &trace, "event",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) "event/syscall selector. use 'perf list' to list available events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) trace__parse_events_option),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) "event filter", parse_filter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) OPT_BOOLEAN(0, "comm", &trace.show_comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) "show the thread COMM next to its id"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) trace__parse_events_option),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) OPT_STRING('o', "output", &output_name, "file", "output file name"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) "trace events on existing process id"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) "trace events on existing thread id"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) "system-wide collection from all CPUs"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) "list of cpus to monitor"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) "child tasks do not inherit counters"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) "number of mmap data pages",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) perf_evlist__parse_mmap_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) "user to profile"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) OPT_CALLBACK(0, "duration", &trace, "float",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) "show only events with duration > N.M ms",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) trace__set_duration),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) #ifdef HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) OPT_INCR('v', "verbose", &verbose, "be more verbose"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) OPT_BOOLEAN('T', "time", &trace.full_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) "Show full timestamp, not time relative to first start"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) OPT_BOOLEAN(0, "failure", &trace.failure_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) "Show only syscalls that failed"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) OPT_BOOLEAN('s', "summary", &trace.summary_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) "Show only syscall summary with statistics"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) OPT_BOOLEAN('S', "with-summary", &trace.summary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) "Show all syscalls and summary with statistics"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) "Show errno stats per syscall, use with -s or -S"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) "Trace pagefaults", parse_pagefaults, "maj"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) OPT_CALLBACK(0, "call-graph", &trace.opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) "record_mode[,record_size]", record_callchain_help,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) &record_parse_callchain_opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) "Use libtraceevent to print the tracepoint arguments."),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) "Show the kernel callchains on the syscall exit path"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) OPT_ULONG(0, "max-events", &trace.max_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) "Set the maximum number of events to print, exit after that is reached. "),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) OPT_UINTEGER(0, "min-stack", &trace.min_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) "Set the minimum stack depth when parsing the callchain, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) "anything below the specified depth will be ignored."),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) OPT_UINTEGER(0, "max-stack", &trace.max_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) "Set the maximum stack depth when parsing the callchain, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) "anything beyond the specified depth will be ignored. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) "Sort batch of events before processing, use if getting out of order events"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) "per thread proc mmap processing timeout in ms"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) trace__parse_cgroups),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) "ms to wait before starting measurement after program "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) "start"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) OPTS_EVSWITCH(&trace.evswitch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) OPT_END()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) bool __maybe_unused max_stack_user_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) bool mmap_pages_user_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) const char * const trace_subcommands[] = { "record", NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) char bf[BUFSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) signal(SIGSEGV, sighandler_dump_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) signal(SIGFPE, sighandler_dump_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) trace.evlist = evlist__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) trace.sctbl = syscalltbl__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) if (trace.evlist == NULL || trace.sctbl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) pr_err("Not enough memory to run!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) * Parsing .perfconfig may entail creating a BPF event, that may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) * is too small. This affects just this process, not touching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) * global setting. If it fails we'll get something in 'perf trace -v'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) * to help diagnose the problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) rlimit__bump_memlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) err = perf_config(trace__config, &trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) * Here we already passed thru trace__parse_events_option() and it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) * already figured out if -e syscall_name, if not but if --event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) * foo:bar was used, the user is interested _just_ in those, say,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) * tracepoint events, not in the strace-like syscall-name-based mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) * This is important because we need to check if strace-like mode is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) * needed to decided if we should filter out the eBPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) * __augmented_syscalls__ code, if it is in the mix, say, via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) * .perfconfig trace.add_events, and filter those out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) if (!trace.trace_syscalls && !trace.trace_pgfaults &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) trace.trace_syscalls = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) * Now that we have --verbose figured out, lets see if we need to parse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) * events from .perfconfig, so that if those events fail parsing, say some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) * BPF program fails, then we'll be able to use --verbose to see what went
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) * wrong in more detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) if (trace.perfconfig_events != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) struct parse_events_error parse_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) bzero(&parse_err, sizeof(parse_err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) parse_events_print_error(&parse_err, trace.perfconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) usage_with_options_msg(trace_usage, trace_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) "cgroup monitoring only available in system-wide mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) if (IS_ERR(evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) if (evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) trace.syscalls.events.augmented = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) if (evsel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) if (evsel->bpf_obj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) trace.bpf_obj = evsel->bpf_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) * If we have _just_ the augmenter event but don't have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) * explicit --syscalls, then assume we want all strace-like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) * syscalls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) trace.trace_syscalls = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) * So, if we have a syscall augmenter, but trace_syscalls, aka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) * strace-like syscall tracing is not set, then we need to trow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) * away the augmenter, i.e. all the events that were created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) * from that BPF object file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) * This is more to fix the current .perfconfig trace.add_events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) * style of setting up the strace-like eBPF based syscall point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) * payload augmenter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) * All this complexity will be avoided by adding an alternative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) * to trace.add_events in the form of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) * trace.bpf_augmented_syscalls, that will be only parsed if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) * need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) * .perfconfig trace.add_events is still useful if we want, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) * instance, have msr_write.msr in some .perfconfig profile based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) * 'perf trace --config determinism.profile' mode, where for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) * particular goal/workload type we want a set of events and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) * output mode (with timings, etc) instead of having to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) * all via the command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) * Also --config to specify an alternate .perfconfig file needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) * to be implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) if (!trace.trace_syscalls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) trace__delete_augmented_syscalls(&trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) trace__set_bpf_map_filtered_pids(&trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) trace__set_bpf_map_syscalls(&trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) err = bpf__setup_stdout(trace.evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) if (map_dump_str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) if (trace.dump.map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) if (trace.trace_pgfaults) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) trace.opts.sample_address = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) trace.opts.sample_time = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) if (trace.opts.mmap_pages == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) mmap_pages_user_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) if (trace.max_stack == UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) max_stack_user_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) #ifdef HAVE_DWARF_UNWIND_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) if (callchain_param.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) if (!mmap_pages_user_set && geteuid() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) symbol_conf.use_callchain = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) if (trace.evlist->core.nr_entries > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) if (evlist__set_syscall_tp_fields(trace.evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) perror("failed to set syscalls:* tracepoint fields");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) if (trace.sort_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) ordered_events__set_copy_on_queue(&trace.oe.data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) * If we are augmenting syscalls, then combine what we put in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) * __augmented_syscalls__ BPF map with what is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) * We'll switch to look at two BPF maps, one for sys_enter and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) * other for sys_exit when we start augmenting the sys_exit paths with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) * buffers that are being copied from kernel to userspace, think 'read'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) * syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) if (trace.syscalls.events.augmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) evlist__for_each_entry(trace.evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) if (raw_syscalls_sys_exit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) trace.raw_augmented_syscalls = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) goto init_augmented_syscall_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) if (trace.syscalls.events.augmented->priv == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) strstr(evsel__name(evsel), "syscalls:sys_enter")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) struct evsel *augmented = trace.syscalls.events.augmented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) evsel__init_augmented_syscall_tp_args(augmented))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) * Augmented is __augmented_syscalls__ BPF_OUTPUT event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) * Above we made sure we can get from the payload the tp fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) * that we get from syscalls:sys_enter tracefs format file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) augmented->handler = trace__sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) * Now we do the same for the *syscalls:sys_enter event so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) * if we handle it directly, i.e. if the BPF prog returns 0 so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) * as not to filter it, then we'll handle it just like we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) * for the BPF_OUTPUT one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) evsel__init_augmented_syscall_tp_args(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) evsel->handler = trace__sys_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) struct syscall_tp *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) init_augmented_syscall_tp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) if (evsel__init_augmented_syscall_tp(evsel, evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) sc = __evsel__syscall_tp(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) * For now with BPF raw_augmented we hook into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) * raw_syscalls:sys_enter and there we get all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) * 6 syscall args plus the tracepoint common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) * fields and the syscall_nr (another long).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) * So we check if that is the case and if so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) * don't look after the sc->args_size but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) * always after the full raw_syscalls:sys_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) * payload, which is fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) * We'll revisit this later to pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) * s->args_size to the BPF augmenter (now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) * tools/perf/examples/bpf/augmented_raw_syscalls.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) * so that it copies only what we need for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) * syscall, like what happens when we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) * syscalls:sys_enter_NAME, so that we reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) * the kernel/userspace traffic to just what is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) * needed for each syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) if (trace.raw_augmented_syscalls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) evsel__init_augmented_syscall_tp_ret(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) evsel->handler = trace__sys_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) return trace__record(&trace, argc-1, &argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) /* Using just --errno-summary will trigger --summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) if (trace.errno_summary && !trace.summary && !trace.summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) trace.summary_only = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) /* summary_only implies summary option, but don't overwrite summary if set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) if (trace.summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) trace.summary = trace.summary_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) if (output_name != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) err = trace__open_output(&trace, output_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) perror("failed to create output file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) err = target__validate(&trace.opts.target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) target__strerror(&trace.opts.target, err, bf, sizeof(bf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) fprintf(trace.output, "%s", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) err = target__parse_uid(&trace.opts.target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) target__strerror(&trace.opts.target, err, bf, sizeof(bf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) fprintf(trace.output, "%s", bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) if (!argc && target__none(&trace.opts.target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) trace.opts.target.system_wide = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) if (input_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) err = trace__replay(&trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) err = trace__run(&trace, argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) out_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) if (output_name != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) fclose(trace.output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) zfree(&trace.perfconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) }