^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #undef TRACE_SYSTEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define TRACE_SYSTEM sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _TRACE_SCHED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/numa_balancing.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Tracepoint for calling kthread_stop, performed to end a kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) TRACE_EVENT(sched_kthread_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) TP_PROTO(struct task_struct *t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) TP_ARGS(t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __entry->pid = t->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Tracepoint for the return value of the kthread stopping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) TRACE_EVENT(sched_kthread_stop_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) TP_PROTO(int ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) TP_ARGS(ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __field( int, ret )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __entry->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) TP_printk("ret=%d", __entry->ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Tracepoint for waking up a task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DECLARE_EVENT_CLASS(sched_wakeup_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) TP_ARGS(__perf_task(p)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __field( int, prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __field( int, success )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __field( int, target_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __entry->pid = p->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __entry->success = 1; /* rudiment, kill when possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __entry->target_cpu = task_cpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __entry->comm, __entry->pid, __entry->prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __entry->target_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Tracepoint called when waking a task; this tracepoint is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * called from the waking context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) DEFINE_EVENT(sched_wakeup_template, sched_waking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * It is not always called from the waking context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Tracepoint for waking up a new task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #ifdef CONFIG_SCHED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) BUG_ON(p != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif /* CONFIG_SCHED_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Preemption ignores task state, therefore preempted tasks are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * RUNNING (we will not have dequeued if state != RUNNING).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (preempt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return TASK_REPORT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * task_state_index() uses fls() and returns a value from 0-8 range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * it for left shift operation to get the correct task->state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) state = task_state_index(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return state ? (1 << (state - 1)) : state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif /* CREATE_TRACE_POINTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Tracepoint for task switches, performed by the scheduler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) TRACE_EVENT(sched_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) TP_PROTO(bool preempt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct task_struct *next),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) TP_ARGS(preempt, prev, next),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __array( char, prev_comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __field( pid_t, prev_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) __field( int, prev_prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __field( long, prev_state )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) __array( char, next_comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __field( pid_t, next_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __field( int, next_prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __entry->prev_pid = prev->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __entry->prev_prio = prev->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __entry->prev_state = __trace_sched_switch_state(preempt, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __entry->next_pid = next->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __entry->next_prio = next->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* XXX SCHED_DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) { TASK_INTERRUPTIBLE, "S" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) { TASK_UNINTERRUPTIBLE, "D" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) { __TASK_STOPPED, "T" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) { __TASK_TRACED, "t" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) { EXIT_DEAD, "X" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) { EXIT_ZOMBIE, "Z" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { TASK_PARKED, "P" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) { TASK_DEAD, "I" }) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) "R",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __entry->next_comm, __entry->next_pid, __entry->next_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Tracepoint for a task being migrated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) TRACE_EVENT(sched_migrate_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) TP_PROTO(struct task_struct *p, int dest_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) TP_ARGS(p, dest_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __field( int, prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) __field( int, orig_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __field( int, dest_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __field( int, running )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __entry->pid = p->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __entry->orig_cpu = task_cpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __entry->dest_cpu = dest_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) __entry->running = (p->state == TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d running=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __entry->comm, __entry->pid, __entry->prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __entry->orig_cpu, __entry->dest_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __entry->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) DECLARE_EVENT_CLASS(sched_process_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) TP_ARGS(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) __field( int, prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __entry->pid = p->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) TP_printk("comm=%s pid=%d prio=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __entry->comm, __entry->pid, __entry->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Tracepoint for freeing a task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) DEFINE_EVENT(sched_process_template, sched_process_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Tracepoint for a task exiting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) DEFINE_EVENT(sched_process_template, sched_process_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Tracepoint for waiting on task to unschedule:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) DEFINE_EVENT(sched_process_template, sched_wait_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) TP_PROTO(struct task_struct *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) TP_ARGS(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Tracepoint for a waiting task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) TRACE_EVENT(sched_process_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) TP_PROTO(struct pid *pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) TP_ARGS(pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __field( int, prio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __entry->pid = pid_nr(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) TP_printk("comm=%s pid=%d prio=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __entry->comm, __entry->pid, __entry->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Tracepoint for do_fork:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) TRACE_EVENT(sched_process_fork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) TP_PROTO(struct task_struct *parent, struct task_struct *child),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) TP_ARGS(parent, child),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __array( char, parent_comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __field( pid_t, parent_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) __array( char, child_comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) __field( pid_t, child_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __entry->parent_pid = parent->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) __entry->child_pid = child->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) __entry->parent_comm, __entry->parent_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __entry->child_comm, __entry->child_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Tracepoint for exec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) TRACE_EVENT(sched_process_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) TP_PROTO(struct task_struct *p, pid_t old_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct linux_binprm *bprm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) TP_ARGS(p, old_pid, bprm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) __string( filename, bprm->filename )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) __field( pid_t, old_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) __assign_str(filename, bprm->filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) __entry->pid = p->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) __entry->old_pid = old_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) __entry->pid, __entry->old_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #ifdef CONFIG_SCHEDSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * adding sched_stat support to SCHED_FIFO/RR would be welcome.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) TP_PROTO(struct task_struct *tsk, u64 delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) TP_ARGS(__perf_task(tsk), __perf_count(delay)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __field( u64, delay )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) __entry->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) __entry->delay = delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) TP_printk("comm=%s pid=%d delay=%Lu [ns]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) __entry->comm, __entry->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (unsigned long long)__entry->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Tracepoint for accounting wait time (time the task is runnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * but not actually running due to scheduler contention).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) TP_PROTO(struct task_struct *tsk, u64 delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) TP_ARGS(tsk, delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Tracepoint for accounting sleep time (time the task is not runnable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * including iowait, see below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) TP_PROTO(struct task_struct *tsk, u64 delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) TP_ARGS(tsk, delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Tracepoint for accounting iowait time (time the task is not runnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * due to waiting on IO to complete).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) TP_PROTO(struct task_struct *tsk, u64 delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) TP_ARGS(tsk, delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Tracepoint for accounting blocked time (time the task is in uninterruptible).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) TP_PROTO(struct task_struct *tsk, u64 delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) TP_ARGS(tsk, delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Tracepoint for recording the cause of uninterruptible sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) TRACE_EVENT(sched_blocked_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) TP_PROTO(struct task_struct *tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) TP_ARGS(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __field( void*, caller )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __field( bool, io_wait )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) __entry->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) __entry->caller = (void *)get_wchan(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) __entry->io_wait = tsk->in_iowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Tracepoint for accounting runtime (time the task is executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * on a CPU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) DECLARE_EVENT_CLASS(sched_stat_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) TP_ARGS(tsk, __perf_count(runtime), vruntime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __field( u64, runtime )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) __field( u64, vruntime )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) __entry->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) __entry->runtime = runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __entry->vruntime = vruntime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) __entry->comm, __entry->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) (unsigned long long)__entry->runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (unsigned long long)__entry->vruntime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) TP_ARGS(tsk, runtime, vruntime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Tracepoint for showing priority inheritance modifying a tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) TRACE_EVENT(sched_pi_setprio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) TP_ARGS(tsk, pi_task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) __field( int, oldprio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __field( int, newprio )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __entry->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) __entry->oldprio = tsk->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __entry->newprio = pi_task ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) min(tsk->normal_prio, pi_task->prio) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) tsk->normal_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* XXX SCHED_DEADLINE bits missing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __entry->comm, __entry->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) __entry->oldprio, __entry->newprio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #ifdef CONFIG_DETECT_HUNG_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) TRACE_EVENT(sched_process_hang,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) TP_PROTO(struct task_struct *tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) TP_ARGS(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) __array( char, comm, TASK_COMM_LEN )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) __entry->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #endif /* CONFIG_DETECT_HUNG_TASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Tracks migration of tasks from one runqueue to another. Can be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * detect if automatic NUMA balancing is bouncing between nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) TRACE_EVENT(sched_move_numa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) TP_ARGS(tsk, src_cpu, dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) __field( pid_t, pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) __field( pid_t, tgid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) __field( pid_t, ngid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) __field( int, src_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) __field( int, src_nid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) __field( int, dst_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) __field( int, dst_nid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) __entry->pid = task_pid_nr(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) __entry->tgid = task_tgid_nr(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __entry->ngid = task_numa_group_id(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) __entry->src_cpu = src_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) __entry->src_nid = cpu_to_node(src_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) __entry->dst_cpu = dst_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __entry->dst_nid = cpu_to_node(dst_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) __entry->pid, __entry->tgid, __entry->ngid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) __entry->src_cpu, __entry->src_nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) __entry->dst_cpu, __entry->dst_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) DECLARE_EVENT_CLASS(sched_numa_pair_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) TP_PROTO(struct task_struct *src_tsk, int src_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct task_struct *dst_tsk, int dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __field( pid_t, src_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __field( pid_t, src_tgid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) __field( pid_t, src_ngid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) __field( int, src_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) __field( int, src_nid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) __field( pid_t, dst_pid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) __field( pid_t, dst_tgid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) __field( pid_t, dst_ngid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __field( int, dst_cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) __field( int, dst_nid )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) __entry->src_pid = task_pid_nr(src_tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) __entry->src_tgid = task_tgid_nr(src_tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) __entry->src_ngid = task_numa_group_id(src_tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) __entry->src_cpu = src_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) __entry->src_nid = cpu_to_node(src_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) __entry->dst_cpu = dst_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) __entry->src_cpu, __entry->src_nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __entry->dst_cpu, __entry->dst_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) TP_PROTO(struct task_struct *src_tsk, int src_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct task_struct *dst_tsk, int dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) TP_PROTO(struct task_struct *src_tsk, int src_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct task_struct *dst_tsk, int dst_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Tracepoint for waking a polling cpu without an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) TRACE_EVENT(sched_wake_idle_without_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) TP_PROTO(int cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) TP_ARGS(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) __field( int, cpu )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) __entry->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) TP_printk("cpu=%d", __entry->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Following tracepoints are not exported in tracefs and provide hooking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * mechanisms only for testing and debugging purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Postfixed with _tp to make them easily identifiable in the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) DECLARE_TRACE(pelt_cfs_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) TP_PROTO(struct cfs_rq *cfs_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) TP_ARGS(cfs_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) DECLARE_TRACE(pelt_rt_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) TP_PROTO(struct rq *rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) TP_ARGS(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) DECLARE_TRACE(pelt_dl_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) TP_PROTO(struct rq *rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) TP_ARGS(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) DECLARE_TRACE(pelt_thermal_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) TP_PROTO(struct rq *rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) TP_ARGS(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) DECLARE_TRACE(pelt_irq_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) TP_PROTO(struct rq *rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) TP_ARGS(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) DECLARE_TRACE(pelt_se_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) TP_PROTO(struct sched_entity *se),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) TP_ARGS(se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) DECLARE_TRACE(sched_cpu_capacity_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) TP_PROTO(struct rq *rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) TP_ARGS(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) DECLARE_TRACE(sched_overutilized_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) TP_PROTO(struct root_domain *rd, bool overutilized),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) TP_ARGS(rd, overutilized));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) DECLARE_TRACE(sched_util_est_cfs_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) TP_PROTO(struct cfs_rq *cfs_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) TP_ARGS(cfs_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) DECLARE_TRACE(sched_util_est_se_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) TP_PROTO(struct sched_entity *se),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) TP_ARGS(se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) DECLARE_TRACE(sched_update_nr_running_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) TP_PROTO(struct rq *rq, int change),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) TP_ARGS(rq, change));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #endif /* _TRACE_SCHED_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* This part must be outside protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) #include <trace/define_trace.h>