Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #ifdef CONFIG_SCHEDSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Expects runqueue lock to be held for atomicity of update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	if (rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 		rq->rq_sched_info.run_delay += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 		rq->rq_sched_info.pcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * Expects runqueue lock to be held for atomicity of update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) rq_sched_info_depart(struct rq *rq, unsigned long long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	if (rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		rq->rq_cpu_time += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		rq->rq_sched_info.run_delay += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define __schedstat_inc(var)		do { var++; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define __schedstat_add(var, amt)	do { var += (amt); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define __schedstat_set(var, val)	do { var = (val); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define   schedstat_val(var)		(var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #else /* !CONFIG_SCHEDSTATS: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) # define   schedstat_enabled()		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) # define __schedstat_inc(var)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) # define   schedstat_inc(var)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) # define __schedstat_add(var, amt)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) # define   schedstat_add(var, amt)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) # define __schedstat_set(var, val)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) # define   schedstat_set(var, val)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) # define   schedstat_val(var)		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) # define   schedstat_val_or_zero(var)	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #endif /* CONFIG_SCHEDSTATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #ifdef CONFIG_PSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * PSI tracks state that persists across sleeps, such as iowaits and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * memory stalls. As a result, it has to distinguish between sleeps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * where a task's runnable state changes, and requeues, where a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * and its state are being moved between CPUs and runqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static inline void psi_enqueue(struct task_struct *p, bool wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int clear = 0, set = TSK_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (static_branch_likely(&psi_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (!wakeup || p->sched_psi_wake_requeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		if (p->in_memstall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			set |= TSK_MEMSTALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		if (p->sched_psi_wake_requeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			p->sched_psi_wake_requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		if (p->in_iowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			clear |= TSK_IOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	psi_task_change(p, clear, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline void psi_dequeue(struct task_struct *p, bool sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int clear = TSK_RUNNING, set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (static_branch_likely(&psi_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (!sleep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		if (p->in_memstall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			clear |= TSK_MEMSTALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 * When a task sleeps, schedule() dequeues it before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		 * switching to the next one. Merge the clearing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		 * TSK_RUNNING and TSK_ONCPU to save an unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		 * psi_task_change() call in psi_sched_switch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		clear |= TSK_ONCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (p->in_iowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			set |= TSK_IOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	psi_task_change(p, clear, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline void psi_ttwu_dequeue(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (static_branch_likely(&psi_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * Is the task being migrated during a wakeup? Make sure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * deregister its sleep-persistent psi states from the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * queue, and let psi_enqueue() know it has to requeue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (unlikely(p->in_iowait || p->in_memstall)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		struct rq_flags rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		int clear = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (p->in_iowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			clear |= TSK_IOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		if (p->in_memstall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			clear |= TSK_MEMSTALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		rq = __task_rq_lock(p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		psi_task_change(p, clear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		p->sched_psi_wake_requeue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		__task_rq_unlock(rq, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline void psi_sched_switch(struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 				    struct task_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				    bool sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (static_branch_likely(&psi_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	psi_task_switch(prev, next, sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline void psi_task_tick(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (static_branch_likely(&psi_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (unlikely(rq->curr->in_memstall))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		psi_memstall_tick(rq->curr, cpu_of(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #else /* CONFIG_PSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline void psi_ttwu_dequeue(struct task_struct *p) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void psi_sched_switch(struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				    struct task_struct *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 				    bool sleep) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void psi_task_tick(struct rq *rq) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif /* CONFIG_PSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef CONFIG_SCHED_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline void sched_info_reset_dequeued(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	t->sched_info.last_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * We are interested in knowing how long it was from the *first* time a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * task was queued to the time that it finally hit a CPU, we call this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * from dequeue_task() to account for possible rq->clock skew across CPUs. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * delta taken on each CPU would annul the skew.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	unsigned long long now = rq_clock(rq), delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (sched_info_on()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (t->sched_info.last_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			delta = now - t->sched_info.last_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	sched_info_reset_dequeued(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	t->sched_info.run_delay += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	rq_sched_info_dequeued(rq, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Called when a task finally hits the CPU.  We can now calculate how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * long it was waiting to run.  We also note when it began so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * can keep stats on how long its timeslice is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void sched_info_arrive(struct rq *rq, struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	unsigned long long now = rq_clock(rq), delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (t->sched_info.last_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		delta = now - t->sched_info.last_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	sched_info_reset_dequeued(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	t->sched_info.run_delay += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	t->sched_info.last_arrival = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	t->sched_info.pcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	rq_sched_info_arrive(rq, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * This function is only called from enqueue_task(), but also only updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * the timestamp if it is already not set.  It's assumed that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * sched_info_dequeued() will clear that stamp when appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (sched_info_on()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (!t->sched_info.last_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			t->sched_info.last_queued = rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * Called when a process ceases being the active-running process involuntarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * due, typically, to expiring its time slice (this may also be called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * switching to the idle task).  Now we can calculate how long we ran.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * Also, if the process is still in the TASK_RUNNING state, call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * sched_info_queued() to mark that it has now again started waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * the runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	rq_sched_info_depart(rq, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (t->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		sched_info_queued(rq, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * Called when tasks are switched involuntarily due, typically, to expiring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * their time slice.  (This may also be called when switching to or from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * the idle task.)  We are only called when prev != next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * prev now departs the CPU.  It's not interesting to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * stats about how efficient we were at scheduling the idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * process, however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (prev != rq->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		sched_info_depart(rq, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (next != rq->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		sched_info_arrive(rq, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (sched_info_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		__sched_info_switch(rq, prev, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #else /* !CONFIG_SCHED_INFO: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) # define sched_info_queued(rq, t)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) # define sched_info_reset_dequeued(t)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) # define sched_info_dequeued(rq, t)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) # define sched_info_depart(rq, t)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) # define sched_info_arrive(rq, next)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) # define sched_info_switch(rq, t, next)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #endif /* CONFIG_SCHED_INFO */