^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "builtin.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include "perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "perf-sys.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "util/cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "util/evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "util/evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "util/evsel_fprintf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "util/symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "util/thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "util/header.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "util/session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "util/tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "util/cloexec.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "util/thread_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "util/color.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "util/stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "util/string2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "util/callchain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "util/time-utils.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <subcmd/pager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <subcmd/parse-options.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "util/trace-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "util/debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "util/event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <sys/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <sys/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <math.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <api/fs/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <perf/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/time64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PR_SET_NAME 15 /* Set process name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define MAX_CPUS 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define COMM_LEN 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SYM_LEN 129
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MAX_PID 1024000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static const char *cpu_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct sched_atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct task_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) char comm[COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long curr_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct sched_atom **atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pthread_t thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) sem_t sleep_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sem_t ready_for_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sem_t work_done_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u64 cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) enum sched_event_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) SCHED_EVENT_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) SCHED_EVENT_SLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) SCHED_EVENT_WAKEUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) SCHED_EVENT_MIGRATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct sched_atom {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) enum sched_event_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int specific_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u64 duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sem_t *wait_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct task_desc *wakee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* task state bitmask, copied from include/linux/sched.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define TASK_RUNNING 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TASK_INTERRUPTIBLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define TASK_UNINTERRUPTIBLE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define __TASK_STOPPED 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define __TASK_TRACED 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* in tsk->exit_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define EXIT_DEAD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define EXIT_ZOMBIE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* in tsk->state again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define TASK_DEAD 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define TASK_WAKEKILL 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define TASK_WAKING 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define TASK_PARKED 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum thread_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) THREAD_SLEEPING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) THREAD_WAIT_CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) THREAD_SCHED_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) THREAD_IGNORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct work_atom {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) enum thread_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u64 sched_out_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u64 wake_up_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u64 sched_in_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct work_atoms {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct list_head work_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u64 max_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u64 max_lat_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 max_lat_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u64 total_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u64 nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u64 total_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int num_merged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct perf_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct trace_sched_handler {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct perf_sample *sample, struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct perf_sample *sample, struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct perf_sample *sample, struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int (*fork_event)(struct perf_sched *sched, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int (*migrate_task_event)(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define COLOR_PIDS PERF_COLOR_BLUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define COLOR_CPUS PERF_COLOR_BG_RED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct perf_sched_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int *comp_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct perf_thread_map *color_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) const char *color_pids_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct perf_cpu_map *color_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const char *color_cpus_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct perf_cpu_map *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const char *cpus_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct perf_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct perf_tool tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) const char *sort_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned long nr_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct task_desc **pid_to_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct task_desc **tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) const struct trace_sched_handler *tp_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pthread_mutex_t start_work_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pthread_mutex_t work_done_wait_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int profile_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Track the current task - that way we can know whether there's any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * weird events, such as a task being switched away that is not current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int max_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 curr_pid[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct thread *curr_thread[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) char next_shortname1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) char next_shortname2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int replay_repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned long nr_run_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned long nr_sleep_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long nr_wakeup_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned long nr_sleep_corrections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long nr_run_events_optimized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long targetless_wakeups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long multitarget_wakeups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long nr_runs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long nr_timestamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned long nr_unordered_timestamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned long nr_context_switch_bugs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned long nr_lost_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long nr_lost_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u64 run_measurement_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u64 sleep_measurement_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u64 start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u64 runavg_cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 parent_cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 runavg_parent_cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u64 sum_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u64 sum_fluct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u64 run_avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u64 all_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u64 all_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u64 cpu_last_switched[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct list_head sort_list, cmp_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bool skip_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct perf_sched_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* options for timehist command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bool summary_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bool idle_hist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) bool show_callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned int max_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bool show_cpu_visual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool show_wakeups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool show_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) bool show_migrations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool show_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u64 skipped_samples;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) const char *time_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct perf_time_interval ptime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct perf_time_interval hist_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* per thread run time data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct thread_runtime {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u64 last_time; /* time of previous sched in/out event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u64 dt_run; /* run time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u64 dt_delay; /* time between wakeup and sched-in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u64 ready_to_run; /* time of wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct stats run_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u64 total_run_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u64 total_sleep_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u64 total_iowait_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u64 total_preempt_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u64 total_delay_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int last_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) char shortname[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool comm_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 migrations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* per event run time data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct evsel_runtime {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u64 *last_time; /* time this event was last seen per cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 ncpu; /* highest cpu slot allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* per cpu idle time data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct idle_thread_runtime {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct thread_runtime tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct thread *last_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rb_root_cached sorted_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct callchain_root callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct callchain_cursor cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* track idle times per cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static struct thread **idle_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int idle_max_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static char idle_comm[] = "<idle>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static u64 get_nsecs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct timespec ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) clock_gettime(CLOCK_MONOTONIC, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u64 T0 = get_nsecs(), T1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void sleep_nsecs(u64 nsecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct timespec ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ts.tv_nsec = nsecs % 999999999;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ts.tv_sec = nsecs / 999999999;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) nanosleep(&ts, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void calibrate_run_measurement_overhead(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) T0 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) burn_nsecs(sched, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) delta = T1-T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) min_delta = min(min_delta, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) sched->run_measurement_overhead = min_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) T0 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sleep_nsecs(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) delta = T1-T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) min_delta = min(min_delta, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) min_delta -= 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sched->sleep_measurement_overhead = min_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static struct sched_atom *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) get_new_event(struct task_desc *task, u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct sched_atom *event = zalloc(sizeof(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned long idx = task->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) event->timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) event->nr = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) task->nr_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) size = sizeof(struct sched_atom *) * task->nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) task->atoms = realloc(task->atoms, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) BUG_ON(!task->atoms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) task->atoms[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static struct sched_atom *last_event(struct task_desc *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!task->nr_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return task->atoms[task->nr_events - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u64 timestamp, u64 duration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct sched_atom *event, *curr_event = last_event(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * optimize an existing RUN event by merging this one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * to it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) sched->nr_run_events_optimized++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) curr_event->duration += duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) event = get_new_event(task, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) event->type = SCHED_EVENT_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) event->duration = duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sched->nr_run_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u64 timestamp, struct task_desc *wakee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct sched_atom *event, *wakee_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) event = get_new_event(task, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) event->type = SCHED_EVENT_WAKEUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) event->wakee = wakee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) wakee_event = last_event(wakee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sched->targetless_wakeups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (wakee_event->wait_sem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) sched->multitarget_wakeups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) sem_init(wakee_event->wait_sem, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) wakee_event->specific_wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) event->wait_sem = wakee_event->wait_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sched->nr_wakeup_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u64 timestamp, u64 task_state __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct sched_atom *event = get_new_event(task, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) event->type = SCHED_EVENT_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sched->nr_sleep_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static struct task_desc *register_pid(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned long pid, const char *comm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct task_desc *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int pid_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (sched->pid_to_task == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pid_max = MAX_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (pid >= (unsigned long)pid_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sizeof(struct task_desc *))) == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) while (pid >= (unsigned long)pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sched->pid_to_task[pid_max++] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) task = sched->pid_to_task[pid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) task = zalloc(sizeof(*task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) task->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) task->nr = sched->nr_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) strcpy(task->comm, comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * every task starts in sleeping state - this gets ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * if there's no wakeup pointing to this sleep state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) add_sched_event_sleep(sched, task, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) sched->pid_to_task[pid] = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) sched->nr_tasks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) BUG_ON(!sched->tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sched->tasks[task->nr] = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (verbose > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void print_task_traces(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct task_desc *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) task = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) task->nr, task->comm, task->pid, task->nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void add_cross_task_wakeups(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct task_desc *task1, *task2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned long i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) task1 = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) j = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (j == sched->nr_tasks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) task2 = sched->tasks[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) add_sched_event_wakeup(sched, task1, 0, task2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void perf_sched__process_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct sched_atom *atom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) switch (atom->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case SCHED_EVENT_RUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) burn_nsecs(sched, atom->duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case SCHED_EVENT_SLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (atom->wait_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = sem_wait(atom->wait_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case SCHED_EVENT_WAKEUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (atom->wait_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = sem_post(atom->wait_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) case SCHED_EVENT_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static u64 get_cpu_usage_nsec_parent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct rusage ru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u64 sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) err = getrusage(RUSAGE_SELF, &ru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct rlimit limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) bool need_privilege = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) attr.type = PERF_TYPE_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) attr.config = PERF_COUNT_SW_TASK_CLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) force_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) fd = sys_perf_event_open(&attr, 0, -1, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) perf_event_open_cloexec_flag());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (errno == EMFILE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (sched->force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) limit.rlim_cur += sched->nr_tasks - cur_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (limit.rlim_cur > limit.rlim_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) limit.rlim_max = limit.rlim_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) need_privilege = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (need_privilege && errno == EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) strcpy(info, "Need privilege\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto force_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) strcpy(info, "Have a try with -f option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pr_err("Error: sys_perf_event_open() syscall returned "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "with %d (%s)\n%s", fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) str_error_r(errno, sbuf, sizeof(sbuf)), info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) exit(EXIT_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static u64 get_cpu_usage_nsec_self(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u64 runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ret = read(fd, &runtime, sizeof(runtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) BUG_ON(ret != sizeof(runtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct sched_thread_parms {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct task_desc *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct perf_sched *sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static void *thread_func(void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct sched_thread_parms *parms = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct task_desc *this_task = parms->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct perf_sched *sched = parms->sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u64 cpu_usage_0, cpu_usage_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) unsigned long i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) char comm2[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int fd = parms->fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) zfree(&parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sprintf(comm2, ":%s", this_task->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) prctl(PR_SET_NAME, comm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = sem_post(&this_task->ready_for_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = pthread_mutex_lock(&sched->start_work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret = pthread_mutex_unlock(&sched->start_work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cpu_usage_0 = get_cpu_usage_nsec_self(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (i = 0; i < this_task->nr_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) this_task->curr_event = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) perf_sched__process_event(sched, this_task->atoms[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) cpu_usage_1 = get_cpu_usage_nsec_self(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = sem_post(&this_task->work_done_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void create_tasks(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct task_desc *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) pthread_attr_t attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err = pthread_attr_init(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) err = pthread_attr_setstacksize(&attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) err = pthread_mutex_lock(&sched->start_work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) err = pthread_mutex_lock(&sched->work_done_wait_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct sched_thread_parms *parms = malloc(sizeof(*parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) BUG_ON(parms == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) parms->task = task = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) parms->sched = sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) parms->fd = self_open_counters(sched, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) sem_init(&task->sleep_sem, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) sem_init(&task->ready_for_work, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) sem_init(&task->work_done_sem, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) task->curr_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) err = pthread_create(&task->thread, &attr, thread_func, parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void wait_for_tasks(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u64 cpu_usage_0, cpu_usage_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct task_desc *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) unsigned long i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) sched->start_time = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) sched->cpu_usage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pthread_mutex_unlock(&sched->work_done_wait_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) task = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ret = sem_wait(&task->ready_for_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) sem_init(&task->ready_for_work, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) cpu_usage_0 = get_cpu_usage_nsec_parent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) pthread_mutex_unlock(&sched->start_work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) task = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = sem_wait(&task->work_done_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) sem_init(&task->work_done_sem, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) sched->cpu_usage += task->cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) task->cpu_usage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) cpu_usage_1 = get_cpu_usage_nsec_parent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!sched->runavg_cpu_usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) sched->runavg_cpu_usage = sched->cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!sched->runavg_parent_cpu_usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) sched->parent_cpu_usage)/sched->replay_repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = pthread_mutex_lock(&sched->start_work_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) for (i = 0; i < sched->nr_tasks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) task = sched->tasks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sem_init(&task->sleep_sem, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) task->curr_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static void run_one_test(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u64 T0, T1, delta, avg_delta, fluct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) T0 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) wait_for_tasks(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) delta = T1 - T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) sched->sum_runtime += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) sched->nr_runs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) avg_delta = sched->sum_runtime / sched->nr_runs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (delta < avg_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) fluct = avg_delta - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) fluct = delta - avg_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) sched->sum_fluct += fluct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (!sched->run_avg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) sched->run_avg = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) printf("cpu: %0.2f / %0.2f",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * rusage statistics done by the parent, these are less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * accurate than the sched->sum_exec_runtime based statistics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) printf(" [%0.2f / %0.2f]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (sched->nr_sleep_corrections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) sched->nr_sleep_corrections = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static void test_calibrations(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) u64 T0, T1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) T0 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) burn_nsecs(sched, NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) T0 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) sleep_nsecs(NSEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) T1 = get_nsecs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) replay_wakeup_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct evsel *evsel, struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) const char *comm = evsel__strval(evsel, sample, "comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct task_desc *waker, *wakee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (verbose > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) printf("sched_wakeup event %p\n", evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) waker = register_pid(sched, sample->tid, "<unknown>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) wakee = register_pid(sched, pid, comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) add_sched_event_wakeup(sched, waker, sample->time, wakee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static int replay_switch_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) *next_comm = evsel__strval(evsel, sample, "next_comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) next_pid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct task_desc *prev, __maybe_unused *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) u64 timestamp0, timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (verbose > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) printf("sched_switch event %p\n", evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (cpu >= MAX_CPUS || cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) timestamp0 = sched->cpu_last_switched[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (timestamp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) delta = timestamp - timestamp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) prev_comm, prev_pid, next_comm, next_pid, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) prev = register_pid(sched, prev_pid, prev_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) next = register_pid(sched, next_pid, next_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sched->cpu_last_switched[cpu] = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) add_sched_event_run(sched, prev, timestamp, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) add_sched_event_sleep(sched, prev, timestamp, prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static int replay_fork_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct thread *child, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) child = machine__findnew_thread(machine, event->fork.pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) event->fork.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) parent = machine__findnew_thread(machine, event->fork.ppid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) event->fork.ptid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (child == NULL || parent == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_debug("thread does not exist on fork event: child %p, parent %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) child, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (verbose > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) printf("fork event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) register_pid(sched, parent->tid, thread__comm_str(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) register_pid(sched, child->tid, thread__comm_str(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) thread__put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) thread__put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct sort_dimension {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sort_fn_t cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * handle runtime stats saved per thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static struct thread_runtime *thread__init_runtime(struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct thread_runtime *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) r = zalloc(sizeof(struct thread_runtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) init_stats(&r->run_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) thread__set_priv(thread, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static struct thread_runtime *thread__get_runtime(struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct thread_runtime *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) tr = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (tr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) tr = thread__init_runtime(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pr_debug("Failed to malloc memory for runtime data.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct sort_dimension *sort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) BUG_ON(list_empty(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) list_for_each_entry(sort, list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ret = sort->cmp(l, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static struct work_atoms *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct list_head *sort_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct rb_node *node = root->rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct work_atoms key = { .thread = thread };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct work_atoms *atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) atoms = container_of(node, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) cmp = thread_lat_cmp(sort_list, &key, atoms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) else if (cmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) BUG_ON(thread != atoms->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct list_head *sort_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct work_atoms *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) this = container_of(*new, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) cmp = thread_lat_cmp(sort_list, data, this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) rb_link_node(&data->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rb_insert_color_cached(&data->node, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct work_atoms *atoms = zalloc(sizeof(*atoms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) pr_err("No memory at %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) atoms->thread = thread__get(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) INIT_LIST_HEAD(&atoms->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static char sched_out_state(u64 prev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) const char *str = TASK_STATE_TO_CHAR_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return str[prev_state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) add_sched_out_event(struct work_atoms *atoms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) char run_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct work_atom *atom = zalloc(sizeof(*atom));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (!atom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) pr_err("Non memory at %s", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) atom->sched_out_time = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (run_state == 'R') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) atom->state = THREAD_WAIT_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) atom->wake_up_time = atom->sched_out_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) list_add_tail(&atom->list, &atoms->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) add_runtime_event(struct work_atoms *atoms, u64 delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) u64 timestamp __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct work_atom *atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) BUG_ON(list_empty(&atoms->work_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) atom = list_entry(atoms->work_list.prev, struct work_atom, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) atom->runtime += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) atoms->total_runtime += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct work_atom *atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (list_empty(&atoms->work_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) atom = list_entry(atoms->work_list.prev, struct work_atom, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (atom->state != THREAD_WAIT_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (timestamp < atom->wake_up_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) atom->state = THREAD_IGNORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) atom->state = THREAD_SCHED_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) atom->sched_in_time = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) delta = atom->sched_in_time - atom->wake_up_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) atoms->total_lat += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (delta > atoms->max_lat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) atoms->max_lat = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) atoms->max_lat_start = atom->wake_up_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) atoms->max_lat_end = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) atoms->nb_atoms++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int latency_switch_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) next_pid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct work_atoms *out_events, *in_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct thread *sched_out, *sched_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) u64 timestamp0, timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int cpu = sample->cpu, err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) BUG_ON(cpu >= MAX_CPUS || cpu < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) timestamp0 = sched->cpu_last_switched[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) sched->cpu_last_switched[cpu] = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (timestamp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) delta = timestamp - timestamp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) sched_out = machine__findnew_thread(machine, -1, prev_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sched_in = machine__findnew_thread(machine, -1, next_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (sched_out == NULL || sched_in == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!out_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (thread_atoms_insert(sched, sched_out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!out_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) pr_err("out-event: Internal tree error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!in_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (thread_atoms_insert(sched, sched_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (!in_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pr_err("in-event: Internal tree error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * Take came in we have not heard about yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * add in an initial atom in runnable state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (add_sched_out_event(in_events, 'R', timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) add_sched_in_event(in_events, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) thread__put(sched_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) thread__put(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int latency_runtime_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) const u64 runtime = evsel__intval(evsel, sample, "runtime");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct thread *thread = machine__findnew_thread(machine, -1, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) u64 timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int cpu = sample->cpu, err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) BUG_ON(cpu >= MAX_CPUS || cpu < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (thread_atoms_insert(sched, thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) pr_err("in-event: Internal tree error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (add_sched_out_event(atoms, 'R', timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) add_runtime_event(atoms, runtime, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static int latency_wakeup_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct work_atoms *atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct work_atom *atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct thread *wakee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) u64 timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) wakee = machine__findnew_thread(machine, -1, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (wakee == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (thread_atoms_insert(sched, wakee))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) pr_err("wakeup-event: Internal tree error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (add_sched_out_event(atoms, 'S', timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) BUG_ON(list_empty(&atoms->work_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) atom = list_entry(atoms->work_list.prev, struct work_atom, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * As we do not guarantee the wakeup event happens when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * task is out of run queue, also may happen when task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * on run queue and wakeup only change ->state to TASK_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * then we should not set the ->wake_up_time when wake up a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * task which is on run queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * You WILL be missing events if you've recorded only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * one CPU, or are only looking at only one, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * skip in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) goto out_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) sched->nr_timestamps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (atom->sched_out_time > timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) sched->nr_unordered_timestamps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) goto out_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) atom->state = THREAD_WAIT_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) atom->wake_up_time = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) out_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) thread__put(wakee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static int latency_migrate_task_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) u64 timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct work_atoms *atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct work_atom *atom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct thread *migrant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * Only need to worry about migration when profiling one CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (sched->profile_cpu == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) migrant = machine__findnew_thread(machine, -1, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (migrant == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (thread_atoms_insert(sched, migrant))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) register_pid(sched, migrant->tid, thread__comm_str(migrant));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!atoms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pr_err("migration-event: Internal tree error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (add_sched_out_event(atoms, 'R', timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) BUG_ON(list_empty(&atoms->work_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) atom = list_entry(atoms->work_list.prev, struct work_atom, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) sched->nr_timestamps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (atom->sched_out_time > timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sched->nr_unordered_timestamps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) thread__put(migrant);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) u64 avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) char max_lat_start[32], max_lat_end[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!work_list->nb_atoms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * Ignore idle threads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) sched->all_runtime += work_list->total_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) sched->all_count += work_list->nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (work_list->num_merged > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) for (i = 0; i < 24 - ret; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) avg = work_list->total_lat / work_list->nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) (double)work_list->total_runtime / NSEC_PER_MSEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) (double)work_list->max_lat / NSEC_PER_MSEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) max_lat_start, max_lat_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (l->thread == r->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (l->thread->tid < r->thread->tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (l->thread->tid > r->thread->tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return (int)(l->thread - r->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) u64 avgl, avgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!l->nb_atoms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!r->nb_atoms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) avgl = l->total_lat / l->nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) avgr = r->total_lat / r->nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (avgl < avgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (avgl > avgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static int max_cmp(struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (l->max_lat < r->max_lat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (l->max_lat > r->max_lat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (l->nb_atoms < r->nb_atoms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (l->nb_atoms > r->nb_atoms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (l->total_runtime < r->total_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (l->total_runtime > r->total_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static int sort_dimension__add(const char *tok, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static struct sort_dimension avg_sort_dimension = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) .name = "avg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) .cmp = avg_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static struct sort_dimension max_sort_dimension = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) .name = "max",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) .cmp = max_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static struct sort_dimension pid_sort_dimension = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) .name = "pid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) .cmp = pid_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static struct sort_dimension runtime_sort_dimension = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) .name = "runtime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) .cmp = runtime_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static struct sort_dimension switch_sort_dimension = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .name = "switch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .cmp = switch_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct sort_dimension *available_sorts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) &pid_sort_dimension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) &avg_sort_dimension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) &max_sort_dimension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) &switch_sort_dimension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) &runtime_sort_dimension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (!strcmp(available_sorts[i]->name, tok)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) list_add_tail(&available_sorts[i]->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static void perf_sched__sort_lat(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct rb_root_cached *root = &sched->atom_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct work_atoms *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) node = rb_first_cached(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) rb_erase_cached(node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) data = rb_entry(node, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (root == &sched->atom_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) root = &sched->merged_atom_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int process_sched_wakeup_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (sched->tp_handler->wakeup_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) union map_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) bool color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static bool thread__has_color(struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) union map_priv priv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) .ptr = thread__priv(thread),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return priv.color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static struct thread*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct thread *thread = machine__findnew_thread(machine, pid, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) union map_priv priv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) .color = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!sched->map.color_pids || !thread || thread__priv(thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (thread_map__has(sched->map.color_pids, tid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) priv.color = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) thread__set_priv(thread, priv.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct perf_sample *sample, struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct thread *sched_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct thread_runtime *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int new_shortname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) u64 timestamp0, timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int i, this_cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int cpus_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) bool new_cpu = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) const char *color = PERF_COLOR_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) char stimestamp[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (this_cpu > sched->max_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) sched->max_cpu = this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (sched->map.comp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) sched->map.comp_cpus[cpus_nr++] = this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) new_cpu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cpus_nr = sched->max_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) timestamp0 = sched->cpu_last_switched[this_cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) sched->cpu_last_switched[this_cpu] = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (timestamp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) delta = timestamp - timestamp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) sched_in = map__findnew_thread(sched, machine, -1, next_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (sched_in == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) tr = thread__get_runtime(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (tr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) thread__put(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) sched->curr_thread[this_cpu] = thread__get(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) new_shortname = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!tr->shortname[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (!strcmp(thread__comm_str(sched_in), "swapper")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * Don't allocate a letter-number for swapper:0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * as a shortname. Instead, we use '.' for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) tr->shortname[0] = '.';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) tr->shortname[1] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) tr->shortname[0] = sched->next_shortname1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) tr->shortname[1] = sched->next_shortname2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (sched->next_shortname1 < 'Z') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) sched->next_shortname1++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) sched->next_shortname1 = 'A';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (sched->next_shortname2 < '9')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) sched->next_shortname2++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) sched->next_shortname2 = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) new_shortname = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) for (i = 0; i < cpus_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct thread *curr_thread = sched->curr_thread[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct thread_runtime *curr_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) const char *pid_color = color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) const char *cpu_color = color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (curr_thread && thread__has_color(curr_thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) pid_color = COLOR_PIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) cpu_color = COLOR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (cpu != this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) color_fprintf(stdout, color, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) color_fprintf(stdout, cpu_color, "*");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (sched->curr_thread[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (curr_tr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) thread__put(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) color_fprintf(stdout, color, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) color_fprintf(stdout, color, " %12s secs ", stimestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) const char *pid_color = color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (thread__has_color(sched_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) pid_color = COLOR_PIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) color_fprintf(stdout, pid_color, "%s => %s:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) tr->shortname, thread__comm_str(sched_in), sched_in->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) tr->comm_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (sched->map.comp && new_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) color_fprintf(stdout, color, " (CPU %d)", this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) color_fprintf(stdout, color, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) thread__put(sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static int process_sched_switch_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) int this_cpu = sample->cpu, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) next_pid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (sched->curr_pid[this_cpu] != (u32)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Are we trying to switch away a PID that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * not current?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (sched->curr_pid[this_cpu] != prev_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sched->nr_context_switch_bugs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (sched->tp_handler->switch_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sched->curr_pid[this_cpu] = next_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int process_sched_runtime_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (sched->tp_handler->runtime_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) static int perf_sched__process_fork_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /* run the fork event through the perf machineruy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) perf_event__process_fork(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /* and then run additional processing needed for this command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (sched->tp_handler->fork_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return sched->tp_handler->fork_event(sched, event, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static int process_sched_migrate_task_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (sched->tp_handler->migrate_task_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) typedef int (*tracepoint_handler)(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (evsel->handler != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) tracepoint_handler f = evsel->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) err = f(tool, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct thread_runtime *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) err = perf_event__process_comm(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) thread = machine__find_thread(machine, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) pr_err("Internal error: can't find thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) tr = thread__get_runtime(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (tr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) tr->comm_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int perf_sched__read_events(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) const struct evsel_str_handler handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) { "sched:sched_switch", process_sched_switch_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) { "sched:sched_stat_runtime", process_sched_runtime_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) { "sched:sched_wakeup", process_sched_wakeup_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) { "sched:sched_wakeup_new", process_sched_wakeup_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) { "sched:sched_migrate_task", process_sched_migrate_task_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct perf_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .path = input_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .mode = PERF_DATA_MODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) .force = sched->force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) int rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) session = perf_session__new(&data, false, &sched->tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (IS_ERR(session)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) pr_debug("Error creating perf session");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return PTR_ERR(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) symbol__init(&session->header.env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (perf_session__set_tracepoints_handlers(session, handlers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (perf_session__has_traces(session, "record -R")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) int err = perf_session__process_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) pr_err("Failed to process events, error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) sched->nr_events = session->evlist->stats.nr_events[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) sched->nr_lost_events = session->evlist->stats.total_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) out_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) perf_session__delete(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * scheduling times are printed as msec.usec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static inline void print_sched_time(unsigned long long nsecs, int width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unsigned long msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) unsigned long usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) msecs = nsecs / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) nsecs -= msecs * NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) usecs = nsecs / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) printf("%*lu.%03lu ", width, msecs, usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * returns runtime data for event, allocating memory for it the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * first time it is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct evsel_runtime *r = evsel->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (r == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) r = zalloc(sizeof(struct evsel_runtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) evsel->priv = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * save last time event was seen per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) struct evsel_runtime *r = evsel__get_runtime(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (r == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) int i, n = __roundup_pow_of_two(cpu+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) void *p = r->last_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) p = realloc(r->last_time, n * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) r->last_time = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) for (i = r->ncpu; i < n; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) r->last_time[i] = (u64) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) r->ncpu = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) r->last_time[cpu] = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* returns last time this event was seen on the given cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct evsel_runtime *r = evsel__get_runtime(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return r->last_time[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static int comm_width = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static char *timehist_get_commstr(struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) static char str[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) const char *comm = thread__comm_str(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) pid_t tid = thread->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) pid_t pid = thread->pid_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (pid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) n = scnprintf(str, sizeof(str), "%s", comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) else if (tid != pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (n > comm_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) comm_width = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static void timehist_header(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) u32 ncpus = sched->max_cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) u32 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) printf("%15s %6s ", "time", "cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (sched->show_cpu_visual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) for (i = 0, j = 0; i < ncpus; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) printf("%x", j++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (j > 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) printf(" %-*s %9s %9s %9s", comm_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) "task name", "wait time", "sch delay", "run time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (sched->show_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) printf(" %s", "state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * units row
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) printf("%15s %-6s ", "", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (sched->show_cpu_visual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) printf(" %*s ", ncpus, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) printf(" %-*s %9s %9s %9s", comm_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) "[tid/pid]", "(msec)", "(msec)", "(msec)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (sched->show_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) printf(" %5s", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * separator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (sched->show_cpu_visual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) printf(" %.*s ", ncpus, graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) printf(" %.*s %.9s %.9s %.9s", comm_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) graph_dotted_line, graph_dotted_line, graph_dotted_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (sched->show_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) printf(" %.5s", graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static char task_state_char(struct thread *thread, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) unsigned bit = state ? ffs(state) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /* 'I' for idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (thread->tid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return 'I';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static void timehist_print_sample(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct addr_location *al,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) u64 t, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct thread_runtime *tr = thread__priv(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) const char *next_comm = evsel__strval(evsel, sample, "next_comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) u32 max_cpus = sched->max_cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) char tstr[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) char nstr[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) u64 wait_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) printf("%15s [%04d] ", tstr, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (sched->show_cpu_visual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) for (i = 0; i < max_cpus; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /* flag idle times with 'i'; others are sched events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (i == sample->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) c = (thread->tid == 0) ? 'i' : 's';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) c = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) printf("%c", c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) printf(" %-*s ", comm_width, timehist_get_commstr(thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) print_sched_time(wait_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) print_sched_time(tr->dt_delay, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) print_sched_time(tr->dt_run, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (sched->show_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) printf(" %5c ", task_state_char(thread, state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (sched->show_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) printf(" %-*s", comm_width, nstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (sched->show_wakeups && !sched->show_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) printf(" %-*s", comm_width, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (thread->tid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (sched->show_callchain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) sample__fprintf_sym(sample, al, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) EVSEL__PRINT_CALLCHAIN_ARROW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) EVSEL__PRINT_SKIP_IGNORED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) &callchain_cursor, symbol_conf.bt_stop_list, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * Explanation of delta-time stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * t = time of current schedule out event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * tprev = time of previous sched out event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * also time of schedule-in event for current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * last_time = time of last sched change event for current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * (i.e, time process was last scheduled out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * ready_to_run = time of wakeup for current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * -----|------------|------------|------------|------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * last ready tprev t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * time to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * |-------- dt_wait --------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * |- dt_delay -|-- dt_run --|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * dt_run = run time of current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * dt_wait = time between last schedule out event for task and tprev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * represents time spent off the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * dt_delay = time between wakeup and schedule-in of task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static void timehist_update_runtime_stats(struct thread_runtime *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) u64 t, u64 tprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) r->dt_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) r->dt_sleep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) r->dt_iowait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) r->dt_preempt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) r->dt_run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (tprev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) r->dt_run = t - tprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (r->ready_to_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (r->ready_to_run > tprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) r->dt_delay = tprev - r->ready_to_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (r->last_time > tprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) else if (r->last_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) u64 dt_wait = tprev - r->last_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (r->last_state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) r->dt_preempt = dt_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) else if (r->last_state == TASK_UNINTERRUPTIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) r->dt_iowait = dt_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) r->dt_sleep = dt_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) update_stats(&r->run_stats, r->dt_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) r->total_run_time += r->dt_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) r->total_delay_time += r->dt_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) r->total_sleep_time += r->dt_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) r->total_iowait_time += r->dt_iowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) r->total_preempt_time += r->dt_preempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static bool is_idle_sample(struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) /* pid 0 == swapper == idle task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) return evsel__intval(evsel, sample, "prev_pid") == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) return sample->pid == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static void save_task_callchain(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct callchain_cursor *cursor = &callchain_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* want main thread for process - has maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) thread = machine__findnew_thread(machine, sample->pid, sample->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (thread == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) pr_debug("Failed to get thread for pid %d.\n", sample->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (!sched->show_callchain || sample->callchain == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (thread__resolve_callchain(thread, cursor, evsel, sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) NULL, NULL, sched->max_stack + 2) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (verbose > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) pr_err("Failed to resolve callchain. Skipping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) callchain_cursor_commit(cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct callchain_cursor_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct symbol *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) node = callchain_cursor_current(cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (node == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) sym = node->ms.sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (sym) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (!strcmp(sym->name, "schedule") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) !strcmp(sym->name, "__schedule") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) !strcmp(sym->name, "preempt_schedule"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) sym->ignore = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) callchain_cursor_advance(cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static int init_idle_thread(struct thread *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct idle_thread_runtime *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) thread__set_comm(thread, idle_comm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) itr = zalloc(sizeof(*itr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (itr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) init_stats(&itr->tr.run_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) callchain_init(&itr->callchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) callchain_cursor_reset(&itr->cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) thread__set_priv(thread, itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * Track idle stats per cpu by maintaining a local thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * struct for the idle task on each cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static int init_idle_threads(int ncpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) idle_threads = zalloc(ncpu * sizeof(struct thread *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (!idle_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) idle_max_cpu = ncpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /* allocate the actual thread struct if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) for (i = 0; i < ncpu; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) idle_threads[i] = thread__new(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (idle_threads[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ret = init_idle_thread(idle_threads[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static void free_idle_threads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (idle_threads == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) for (i = 0; i < idle_max_cpu; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if ((idle_threads[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) thread__delete(idle_threads[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) free(idle_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static struct thread *get_idle_thread(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * expand/allocate array of pointers to local thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * structs if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) int i, j = __roundup_pow_of_two(cpu+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) p = realloc(idle_threads, j * sizeof(struct thread *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) idle_threads = (struct thread **) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) for (i = idle_max_cpu; i < j; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) idle_threads[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) idle_max_cpu = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /* allocate a new thread struct if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (idle_threads[cpu] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) idle_threads[cpu] = thread__new(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (idle_threads[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (init_idle_thread(idle_threads[cpu]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return idle_threads[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static void save_idle_callchain(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) struct idle_thread_runtime *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (!sched->show_callchain || sample->callchain == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) callchain_cursor__copy(&itr->cursor, &callchain_cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static struct thread *timehist_get_thread(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (is_idle_sample(sample, evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) thread = get_idle_thread(sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) /* there were samples with tid 0 but non-zero pid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) thread = machine__findnew_thread(machine, sample->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) sample->tid ?: sample->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (thread == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) pr_debug("Failed to get thread for tid %d. skipping sample.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) save_task_callchain(sched, sample, evsel, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (sched->idle_hist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct thread *idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct idle_thread_runtime *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) idle = get_idle_thread(sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (idle == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) itr = thread__priv(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (itr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) itr->last_thread = thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /* copy task callchain when entering to idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (evsel__intval(evsel, sample, "next_pid") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) save_idle_callchain(sched, itr, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) return thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static bool timehist_skip_sample(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) bool rc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (thread__is_filtered(thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) sched->skipped_samples++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (sched->idle_hist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (strcmp(evsel__name(evsel), "sched:sched_switch"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) evsel__intval(evsel, sample, "next_pid") != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static void timehist_print_wakeup_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct thread *awakened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) char tstr[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) thread = machine__findnew_thread(machine, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /* show wakeup unless both awakee and awaker are filtered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (timehist_skip_sample(sched, thread, evsel, sample) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) timehist_skip_sample(sched, awakened, evsel, sample)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) printf("%15s [%04d] ", tstr, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (sched->show_cpu_visual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) printf(" %*s ", sched->max_cpu + 1, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) printf(" %-*s ", comm_width, timehist_get_commstr(thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) /* dt spacer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) printf(" %9s %9s %9s ", "", "", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) printf("awakened: %s", timehist_get_commstr(awakened));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) struct evsel *evsel __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static int timehist_sched_wakeup_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct thread_runtime *tr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) /* want pid of awakened task not pid in sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) thread = machine__findnew_thread(machine, 0, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) tr = thread__get_runtime(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (tr->ready_to_run == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) tr->ready_to_run = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* show wakeups if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) if (sched->show_wakeups &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) !perf_time__skip_sample(&sched->ptime, sample->time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static void timehist_print_migration_event(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct thread *migrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) char tstr[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) u32 max_cpus = sched->max_cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) u32 ocpu, dcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (sched->summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) max_cpus = sched->max_cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) ocpu = evsel__intval(evsel, sample, "orig_cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) dcpu = evsel__intval(evsel, sample, "dest_cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) thread = machine__findnew_thread(machine, sample->pid, sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (timehist_skip_sample(sched, thread, evsel, sample) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) timehist_skip_sample(sched, migrated, evsel, sample)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) printf("%15s [%04d] ", tstr, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (sched->show_cpu_visual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) for (i = 0; i < max_cpus; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) c = (i == sample->cpu) ? 'm' : ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) printf("%c", c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) printf(" %-*s ", comm_width, timehist_get_commstr(thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) /* dt spacer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) printf(" %9s %9s %9s ", "", "", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) printf("migrated: %s", timehist_get_commstr(migrated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) printf(" cpu %d => %d", ocpu, dcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static int timehist_migrate_task_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct thread_runtime *tr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /* want pid of migrated task not pid in sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) const u32 pid = evsel__intval(evsel, sample, "pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) thread = machine__findnew_thread(machine, 0, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) tr = thread__get_runtime(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) tr->migrations++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) /* show migrations if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) timehist_print_migration_event(sched, evsel, sample, machine, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) static int timehist_sched_change_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) struct perf_time_interval *ptime = &sched->ptime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) struct thread_runtime *tr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u64 tprev, t = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) int state = evsel__intval(evsel, sample, "prev_state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (machine__resolve(machine, &al, sample) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) pr_err("problem processing %d event. skipping it\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) thread = timehist_get_thread(sched, sample, machine, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (thread == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (timehist_skip_sample(sched, thread, evsel, sample))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) tr = thread__get_runtime(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (tr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) tprev = evsel__get_time(evsel, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * If start time given:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * - sample time is under window user cares about - skip sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * - tprev is under window user cares about - reset to start of window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (ptime->start && ptime->start > t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (tprev && ptime->start > tprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) tprev = ptime->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * If end time given:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * - previous sched event is out of window - we are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * - sample time is beyond window user cares about - reset it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * to close out stats for time window interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (ptime->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (tprev > ptime->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (t > ptime->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) t = ptime->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (!sched->idle_hist || thread->tid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) timehist_update_runtime_stats(tr, t, tprev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (sched->idle_hist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) struct idle_thread_runtime *itr = (void *)tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) struct thread_runtime *last_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) BUG_ON(thread->tid != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (itr->last_thread == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) /* add current idle time as last thread's runtime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) last_tr = thread__get_runtime(itr->last_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (last_tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) timehist_update_runtime_stats(last_tr, t, tprev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) * remove delta time of last thread as it's not updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * and otherwise it will show an invalid value next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * time. we only care total run time and run stat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) last_tr->dt_run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) last_tr->dt_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) last_tr->dt_sleep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) last_tr->dt_iowait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) last_tr->dt_preempt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (itr->cursor.nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) callchain_append(&itr->callchain, &itr->cursor, t - tprev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) itr->last_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (!sched->summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (sched->hist_time.start == 0 && t >= ptime->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) sched->hist_time.start = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (ptime->end == 0 || t <= ptime->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) sched->hist_time.end = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) /* time of this sched_switch event becomes last time task seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) tr->last_time = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) /* last state is used to determine where to account wait time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) tr->last_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) /* sched out event for task so reset ready to run time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) tr->ready_to_run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) evsel__save_time(evsel, sample->time, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) static int timehist_sched_switch_event(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return timehist_sched_change_event(tool, event, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static int process_lost(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) char tstr[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) printf("%15s ", tstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) static void print_thread_runtime(struct thread *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) struct thread_runtime *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) double mean = avg_stats(&r->run_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) float stddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) printf("%*s %5d %9" PRIu64 " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) comm_width, timehist_get_commstr(t), t->ppid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) (u64) r->run_stats.n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) print_sched_time(r->total_run_time, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) print_sched_time(r->run_stats.min, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) print_sched_time((u64) mean, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) print_sched_time(r->run_stats.max, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) printf("%5.2f", stddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) printf(" %5" PRIu64, r->migrations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) static void print_thread_waittime(struct thread *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) struct thread_runtime *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) printf("%*s %5d %9" PRIu64 " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) comm_width, timehist_get_commstr(t), t->ppid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) (u64) r->run_stats.n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) print_sched_time(r->total_run_time, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) print_sched_time(r->total_sleep_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) print_sched_time(r->total_iowait_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) print_sched_time(r->total_preempt_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) printf(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) print_sched_time(r->total_delay_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct total_run_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct perf_sched *sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) u64 sched_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) u64 task_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) u64 total_run_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) static int __show_thread_runtime(struct thread *t, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct total_run_stats *stats = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct thread_runtime *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (thread__is_filtered(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) r = thread__priv(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (r && r->run_stats.n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) stats->task_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) stats->sched_count += r->run_stats.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) stats->total_run_time += r->total_run_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (stats->sched->show_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) print_thread_waittime(t, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) print_thread_runtime(t, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) static int show_thread_runtime(struct thread *t, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (t->dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return __show_thread_runtime(t, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) static int show_deadthread_runtime(struct thread *t, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) if (!t->dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) return __show_thread_runtime(t, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) const char *sep = " <- ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct callchain_list *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) size_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) char bf[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (node == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) ret = callchain__fprintf_folded(fp, node->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) first = (ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) list_for_each_entry(chain, &node->val, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (chain->ip >= PERF_CONTEXT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (chain->ms.sym && chain->ms.sym->ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) ret += fprintf(fp, "%s%s", first ? "" : sep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) callchain_list__sym_name(chain, bf, sizeof(bf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) size_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) FILE *fp = stdout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) struct callchain_node *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct rb_node *rb_node = rb_first_cached(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) while (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) chain = rb_entry(rb_node, struct callchain_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) rb_node = rb_next(rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) ret += fprintf(fp, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) print_sched_time(chain->hit, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) ret += 16; /* print_sched_time returns 2nd arg + 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ret += fprintf(fp, " %8d ", chain->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) ret += callchain__fprintf_folded(fp, chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) ret += fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) static void timehist_print_summary(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) struct machine *m = &session->machines.host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) struct total_run_stats totals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) u64 task_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) struct thread *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct thread_runtime *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) u64 hist_time = sched->hist_time.end - sched->hist_time.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) memset(&totals, 0, sizeof(totals));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) totals.sched = sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (sched->idle_hist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) printf("\nIdle-time summary\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) printf("%*s parent sched-out ", comm_width, "comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) } else if (sched->show_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) printf("\nWait-time summary\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) printf("%*s parent sched-in ", comm_width, "comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) printf(" run-time sleep iowait preempt delay\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) printf("\nRuntime summary\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) printf("%*s parent sched-in ", comm_width, "comm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) printf(" run-time min-run avg-run max-run stddev migrations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) printf("%*s (count) ", comm_width, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) printf(" (msec) (msec) (msec) (msec) %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) sched->show_state ? "(msec)" : "%");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) printf("%.117s\n", graph_dotted_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) machine__for_each_thread(m, show_thread_runtime, &totals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) task_count = totals.task_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (!task_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) printf("<no still running tasks>\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) printf("\nTerminated tasks:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) machine__for_each_thread(m, show_deadthread_runtime, &totals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (task_count == totals.task_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) printf("<no terminated tasks>\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) /* CPU idle stats not tracked when samples were skipped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if (sched->skipped_samples && !sched->idle_hist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) printf("\nIdle stats:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) for (i = 0; i < idle_max_cpu; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (cpu_list && !test_bit(i, cpu_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) t = idle_threads[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) r = thread__priv(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (r && r->run_stats.n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) totals.sched_count += r->run_stats.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) printf(" CPU %2d idle for ", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) print_sched_time(r->total_run_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) printf(" CPU %2d idle entire time window\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (sched->idle_hist && sched->show_callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) callchain_param.mode = CHAIN_FOLDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) callchain_param.value = CCVAL_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) callchain_register_param(&callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) printf("\nIdle stats by callchain:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) for (i = 0; i < idle_max_cpu; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) struct idle_thread_runtime *itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) t = idle_threads[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) itr = thread__priv(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (itr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 0, &callchain_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) printf(" CPU %2d:", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) print_sched_time(itr->tr.total_run_time, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) printf(" msec\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) timehist_print_idlehist_callchain(&itr->sorted_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) printf("\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) " Total number of unique tasks: %" PRIu64 "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) "Total number of context switches: %" PRIu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) totals.task_count, totals.sched_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) printf(" Total run time (msec): ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) print_sched_time(totals.total_run_time, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) printf(" Total scheduling time (msec): ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) print_sched_time(hist_time, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) printf(" (x %d)\n", sched->max_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) typedef int (*sched_handler)(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) struct machine *machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) static int perf_timehist__process_sample(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) int this_cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (this_cpu > sched->max_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) sched->max_cpu = this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) if (evsel->handler != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) sched_handler f = evsel->handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) err = f(tool, event, evsel, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) static int timehist_check_attr(struct perf_sched *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct evsel_runtime *er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) list_for_each_entry(evsel, &evlist->core.entries, core.node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) er = evsel__get_runtime(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (er == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) pr_err("Failed to allocate memory for evsel runtime data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (sched->show_callchain && !evsel__has_callchain(evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) pr_info("Samples do not have callchains.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) sched->show_callchain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) symbol_conf.use_callchain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) static int perf_sched__timehist(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) struct evsel_str_handler handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) { "sched:sched_switch", timehist_sched_switch_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) { "sched:sched_wakeup", timehist_sched_wakeup_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) { "sched:sched_waking", timehist_sched_wakeup_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) const struct evsel_str_handler migrate_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) { "sched:sched_migrate_task", timehist_migrate_task_event, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) struct perf_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) .path = input_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) .mode = PERF_DATA_MODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) .force = sched->force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) struct evlist *evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) * event handlers for timehist option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) sched->tool.sample = perf_timehist__process_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) sched->tool.mmap = perf_event__process_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) sched->tool.comm = perf_event__process_comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) sched->tool.exit = perf_event__process_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) sched->tool.fork = perf_event__process_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) sched->tool.lost = process_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) sched->tool.attr = perf_event__process_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) sched->tool.tracing_data = perf_event__process_tracing_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) sched->tool.build_id = perf_event__process_build_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) sched->tool.ordered_events = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) sched->tool.ordering_requires_timestamps = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) symbol_conf.use_callchain = sched->show_callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) session = perf_session__new(&data, false, &sched->tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (IS_ERR(session))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) return PTR_ERR(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (cpu_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) symbol__init(&session->header.env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) pr_err("Invalid time string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if (timehist_check_attr(sched, evlist) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) setup_pager();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) /* prefer sched_waking if it is captured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) "sched:sched_waking"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) handlers[1].handler = timehist_sched_wakeup_ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /* setup per-evsel handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (perf_session__set_tracepoints_handlers(session, handlers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /* sched_switch event at a minimum needs to exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (!perf_evlist__find_tracepoint_by_name(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) "sched:sched_switch")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (sched->show_migrations &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) perf_session__set_tracepoints_handlers(session, migrate_handlers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /* pre-allocate struct for per-CPU idle stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) sched->max_cpu = session->header.env.nr_cpus_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (sched->max_cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) sched->max_cpu = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if (init_idle_threads(sched->max_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) /* summary_only implies summary option, but don't overwrite summary if set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) if (sched->summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) sched->summary = sched->summary_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (!sched->summary_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) timehist_header(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) err = perf_session__process_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) pr_err("Failed to process events, error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) sched->nr_events = evlist->stats.nr_events[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) sched->nr_lost_events = evlist->stats.total_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (sched->summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) timehist_print_summary(sched, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) free_idle_threads();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) perf_session__delete(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) static void print_bad_events(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) sched->nr_unordered_timestamps, sched->nr_timestamps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (sched->nr_lost_events && sched->nr_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) sched->nr_context_switch_bugs, sched->nr_timestamps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (sched->nr_lost_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) printf(" (due to lost events?)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) struct work_atoms *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) const char *comm = thread__comm_str(data->thread), *this_comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) this = container_of(*new, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) this_comm = thread__comm_str(this->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) cmp = strcmp(comm, this_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) if (cmp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) } else if (cmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) this->num_merged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) this->total_runtime += data->total_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) this->nb_atoms += data->nb_atoms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) this->total_lat += data->total_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) list_splice(&data->work_list, &this->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) if (this->max_lat < data->max_lat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) this->max_lat = data->max_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) this->max_lat_start = data->max_lat_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) this->max_lat_end = data->max_lat_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) zfree(&data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) data->num_merged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) rb_link_node(&data->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) rb_insert_color_cached(&data->node, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) static void perf_sched__merge_lat(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) struct work_atoms *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (sched->skip_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) while ((node = rb_first_cached(&sched->atom_root))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) rb_erase_cached(node, &sched->atom_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) data = rb_entry(node, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) __merge_work_atoms(&sched->merged_atom_root, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) static int perf_sched__lat(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) setup_pager();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (perf_sched__read_events(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) perf_sched__merge_lat(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) perf_sched__sort_lat(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) next = rb_first_cached(&sched->sorted_atom_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) struct work_atoms *work_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) work_list = rb_entry(next, struct work_atoms, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) output_lat_thread(sched, work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) next = rb_next(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) thread__zput(work_list->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) printf(" -----------------------------------------------------------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) printf(" ---------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) print_bad_events(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) static int setup_map_cpus(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (sched->map.comp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (!sched->map.comp_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) if (!sched->map.cpus_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) map = perf_cpu_map__new(sched->map.cpus_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) sched->map.cpus = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) static int setup_color_pids(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) struct perf_thread_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (!sched->map.color_pids_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) map = thread_map__new_by_tid_str(sched->map.color_pids_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) sched->map.color_pids = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) static int setup_color_cpus(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (!sched->map.color_cpus_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) map = perf_cpu_map__new(sched->map.color_cpus_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) sched->map.color_cpus = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) static int perf_sched__map(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (setup_map_cpus(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (setup_color_pids(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if (setup_color_cpus(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) setup_pager();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) if (perf_sched__read_events(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) print_bad_events(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) static int perf_sched__replay(struct perf_sched *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) calibrate_run_measurement_overhead(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) calibrate_sleep_measurement_overhead(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) test_calibrations(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (perf_sched__read_events(sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) printf("nr_run_events: %ld\n", sched->nr_run_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (sched->targetless_wakeups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (sched->multitarget_wakeups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (sched->nr_run_events_optimized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) printf("run atoms optimized: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) sched->nr_run_events_optimized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) print_task_traces(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) add_cross_task_wakeups(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) create_tasks(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) printf("------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) for (i = 0; i < sched->replay_repeat; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) run_one_test(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) static void setup_sorting(struct perf_sched *sched, const struct option *options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) const char * const usage_msg[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) char *tmp, *tok, *str = strdup(sched->sort_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) for (tok = strtok_r(str, ", ", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) tok; tok = strtok_r(NULL, ", ", &tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) if (sort_dimension__add(tok, &sched->sort_list) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) usage_with_options_msg(usage_msg, options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) "Unknown --sort key: `%s'", tok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) sort_dimension__add("pid", &sched->cmp_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) static bool schedstat_events_exposed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) * Select "sched:sched_stat_wait" event to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) * whether schedstat tracepoints are exposed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) static int __cmd_record(int argc, const char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) unsigned int rec_argc, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) const char **rec_argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) const char * const record_args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) "record",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) "-a",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) "-R",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) "-m", "1024",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) "-c", "1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) "-e", "sched:sched_switch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) "-e", "sched:sched_stat_runtime",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) "-e", "sched:sched_process_fork",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) "-e", "sched:sched_wakeup_new",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) "-e", "sched:sched_migrate_task",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) * The tracepoints trace_sched_stat_{wait, sleep, iowait}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * are not exposed to user if CONFIG_SCHEDSTATS is not set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) * to prevent "perf sched record" execution failure, determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) * whether to record schedstat events according to actual situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) const char * const schedstat_args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) "-e", "sched:sched_stat_wait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) "-e", "sched:sched_stat_sleep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) "-e", "sched:sched_stat_iowait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) unsigned int schedstat_argc = schedstat_events_exposed() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) ARRAY_SIZE(schedstat_args) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) struct tep_event *waking_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) * +2 for either "-e", "sched:sched_wakeup" or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) * "-e", "sched:sched_waking"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) rec_argv = calloc(rec_argc + 1, sizeof(char *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (rec_argv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) for (i = 0; i < ARRAY_SIZE(record_args); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) rec_argv[i] = strdup(record_args[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) rec_argv[i++] = "-e";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) waking_event = trace_event__tp_format("sched", "sched_waking");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (!IS_ERR(waking_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) rec_argv[i++] = strdup("sched:sched_waking");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) rec_argv[i++] = strdup("sched:sched_wakeup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) for (j = 0; j < schedstat_argc; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) rec_argv[i++] = strdup(schedstat_args[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) for (j = 1; j < (unsigned int)argc; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) rec_argv[i] = argv[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) BUG_ON(i != rec_argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) return cmd_record(i, rec_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) int cmd_sched(int argc, const char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) static const char default_sort_order[] = "avg, max, switch, runtime";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) struct perf_sched sched = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) .tool = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) .sample = perf_sched__process_tracepoint_sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) .comm = perf_sched__process_comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) .namespaces = perf_event__process_namespaces,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) .lost = perf_event__process_lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) .fork = perf_sched__process_fork_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) .ordered_events = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) .sort_list = LIST_HEAD_INIT(sched.sort_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) .sort_order = default_sort_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) .replay_repeat = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) .profile_cpu = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) .next_shortname1 = 'A',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) .next_shortname2 = '0',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) .skip_merge = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) .show_callchain = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) .max_stack = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) const struct option sched_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) OPT_STRING('i', "input", &input_name, "file",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) "input file name"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) OPT_INCR('v', "verbose", &verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) "be more verbose (show symbol address, etc)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) "dump raw trace in ASCII"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) OPT_END()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) const struct option latency_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) "sort by key(s): runtime, switch, avg, max"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) OPT_INTEGER('C', "CPU", &sched.profile_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) "CPU to profile on"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) OPT_BOOLEAN('p', "pids", &sched.skip_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) "latency stats per pid instead of per comm"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) OPT_PARENT(sched_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) const struct option replay_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) "repeat the workload replay N times (-1: infinite)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) OPT_PARENT(sched_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) const struct option map_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) OPT_BOOLEAN(0, "compact", &sched.map.comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) "map output in compact mode"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) "highlight given pids in map"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) "highlight given CPUs in map"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) "display given CPUs in map"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) OPT_PARENT(sched_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) const struct option timehist_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) "file", "vmlinux pathname"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) "file", "kallsyms pathname"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) "Display call chains if present (default on)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) OPT_UINTEGER(0, "max-stack", &sched.max_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) "Maximum number of functions to display backtrace."),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) "Look for files with symbols relative to this directory"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) OPT_BOOLEAN('s', "summary", &sched.summary_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) "Show only syscall summary with statistics"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) OPT_BOOLEAN('S', "with-summary", &sched.summary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) "Show all syscalls and summary with statistics"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) OPT_STRING(0, "time", &sched.time_str, "str",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) "Time span for analysis (start,stop)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) "analyze events only for given process id(s)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) "analyze events only for given thread id(s)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) OPT_PARENT(sched_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) const char * const latency_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) "perf sched latency [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) const char * const replay_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) "perf sched replay [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) const char * const map_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) "perf sched map [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) const char * const timehist_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) "perf sched timehist [<options>]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) const char *const sched_subcommands[] = { "record", "latency", "map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) "replay", "script",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) "timehist", NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) const char *sched_usage[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) struct trace_sched_handler lat_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) .wakeup_event = latency_wakeup_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) .switch_event = latency_switch_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) .runtime_event = latency_runtime_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) .migrate_task_event = latency_migrate_task_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) struct trace_sched_handler map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) .switch_event = map_switch_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) struct trace_sched_handler replay_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) .wakeup_event = replay_wakeup_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) .switch_event = replay_switch_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) .fork_event = replay_fork_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) sched.curr_pid[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (!argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) usage_with_options(sched_usage, sched_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) * Aliased to 'perf script' for now:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (!strcmp(argv[0], "script"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) return cmd_script(argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) if (!strncmp(argv[0], "rec", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return __cmd_record(argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) } else if (!strncmp(argv[0], "lat", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) sched.tp_handler = &lat_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) if (argc > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) argc = parse_options(argc, argv, latency_options, latency_usage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) if (argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) usage_with_options(latency_usage, latency_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) setup_sorting(&sched, latency_options, latency_usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) return perf_sched__lat(&sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) } else if (!strcmp(argv[0], "map")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) if (argc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) argc = parse_options(argc, argv, map_options, map_usage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) usage_with_options(map_usage, map_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) sched.tp_handler = &map_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) setup_sorting(&sched, latency_options, latency_usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) return perf_sched__map(&sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) } else if (!strncmp(argv[0], "rep", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) sched.tp_handler = &replay_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (argc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) argc = parse_options(argc, argv, replay_options, replay_usage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) usage_with_options(replay_usage, replay_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) return perf_sched__replay(&sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) } else if (!strcmp(argv[0], "timehist")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (argc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) argc = parse_options(argc, argv, timehist_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) timehist_usage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) usage_with_options(timehist_usage, timehist_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) if ((sched.show_wakeups || sched.show_next) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) sched.summary_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) parse_options_usage(timehist_usage, timehist_options, "s", true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) if (sched.show_wakeups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) parse_options_usage(NULL, timehist_options, "w", true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) if (sched.show_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) parse_options_usage(NULL, timehist_options, "n", true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) return perf_sched__timehist(&sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) usage_with_options(sched_usage, sched_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }