Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * policies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include "pelt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <trace/hooks/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) int sched_rr_timeslice = RR_TIMESLICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) /* More than 4 hours if BW_SHIFT equals 20. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) static const u64 max_rt_runtime = MAX_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) struct rt_bandwidth def_rt_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	struct rt_bandwidth *rt_b =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 		container_of(timer, struct rt_bandwidth, rt_period_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	int idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	int overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		if (!overrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		idle = do_sched_rt_period_timer(rt_b, overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	if (idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		rt_b->rt_period_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	rt_b->rt_period = ns_to_ktime(period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	rt_b->rt_runtime = runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		     HRTIMER_MODE_REL_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	rt_b->rt_period_timer.function = sched_rt_period_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	if (!rt_b->rt_period_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		rt_b->rt_period_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		 * SCHED_DEADLINE updates the bandwidth, as a run away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		 * RT task with a DL task could hog a CPU. But DL does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		 * not reset the period. If a deadline task was running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		 * without an RT task running, it can cause RT tasks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		 * throttle when they start up. Kick the timer right away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		 * to update the period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		hrtimer_start_expires(&rt_b->rt_period_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 				      HRTIMER_MODE_ABS_PINNED_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	do_start_rt_bandwidth(rt_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) void init_rt_rq(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	struct rt_prio_array *array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	for (i = 0; i < MAX_RT_PRIO; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		INIT_LIST_HEAD(array->queue + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		__clear_bit(i, array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	/* delimiter for bitsearch: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	__set_bit(MAX_RT_PRIO, array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #if defined CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	rt_rq->highest_prio.next = MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	rt_rq->rt_nr_migratory = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	rt_rq->overloaded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	plist_head_init(&rt_rq->pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	/* We start is dequeued state, because no RT tasks are queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	rt_rq->rt_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	rt_rq->rt_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	rt_rq->rt_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	rt_rq->rt_runtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	hrtimer_cancel(&rt_b->rt_period_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #ifdef CONFIG_SCHED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	return container_of(rt_se, struct task_struct, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return rt_rq->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return rt_se->rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct rt_rq *rt_rq = rt_se->rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return rt_rq->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) void free_rt_sched_group(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	if (tg->rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		destroy_rt_bandwidth(&tg->rt_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		if (tg->rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			kfree(tg->rt_rq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		if (tg->rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			kfree(tg->rt_se[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	kfree(tg->rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	kfree(tg->rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		struct sched_rt_entity *rt_se, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		struct sched_rt_entity *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct rq *rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	rt_rq->rt_nr_boosted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	rt_rq->rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	rt_rq->tg = tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	tg->rt_rq[cpu] = rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	tg->rt_se[cpu] = rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (!rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		rt_se->rt_rq = &rq->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		rt_se->rt_rq = parent->my_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	rt_se->my_q = rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	rt_se->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	INIT_LIST_HEAD(&rt_se->run_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct rt_rq *rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct sched_rt_entity *rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	if (!tg->rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (!tg->rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	init_rt_bandwidth(&tg->rt_bandwidth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 				     GFP_KERNEL, cpu_to_node(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (!rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 				     GFP_KERNEL, cpu_to_node(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (!rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			goto err_free_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		init_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) err_free_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	kfree(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #else /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define rt_entity_is_task(rt_se) (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	return container_of(rt_se, struct task_struct, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return container_of(rt_rq, struct rq, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct task_struct *p = rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	return task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct rq *rq = rq_of_rt_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	return &rq->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) void free_rt_sched_group(struct task_group *tg) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #endif /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static void pull_rt_task(struct rq *this_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	/* Try to pull RT tasks here if we lower this rq's prio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return rq->rt.highest_prio.curr > prev->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static inline int rt_overloaded(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return atomic_read(&rq->rd->rto_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static inline void rt_set_overload(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (!rq->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * Make sure the mask is visible before we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * the overload count. That is checked to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 * if we should look at the mask. It would be a shame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 * if we looked at the mask, but the mask was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 * updated yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 * Matched by the barrier in pull_rt_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	atomic_inc(&rq->rd->rto_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static inline void rt_clear_overload(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (!rq->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	/* the order here really doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	atomic_dec(&rq->rd->rto_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static void update_rt_migration(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		if (!rt_rq->overloaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			rt_set_overload(rq_of_rt_rq(rt_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			rt_rq->overloaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	} else if (rt_rq->overloaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		rt_clear_overload(rq_of_rt_rq(rt_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		rt_rq->overloaded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (!rt_entity_is_task(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	p = rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	rt_rq->rt_nr_total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		rt_rq->rt_nr_migratory++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	update_rt_migration(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (!rt_entity_is_task(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	p = rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	rt_rq->rt_nr_total--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		rt_rq->rt_nr_migratory--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	update_rt_migration(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static inline int has_pushable_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return !plist_head_empty(&rq->rt.pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static DEFINE_PER_CPU(struct callback_head, rt_push_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static void push_rt_tasks(struct rq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static void pull_rt_task(struct rq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static inline void rt_queue_push_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!has_pushable_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static inline void rt_queue_pull_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	plist_node_init(&p->pushable_tasks, p->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	/* Update the highest prio pushable task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (p->prio < rq->rt.highest_prio.next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		rq->rt.highest_prio.next = p->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/* Update the new highest prio pushable task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (has_pushable_tasks(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		p = plist_first_entry(&rq->rt.pushable_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				      struct task_struct, pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		rq->rt.highest_prio.next = p->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		rq->rt.highest_prio.next = MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static inline void pull_rt_task(struct rq *this_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static inline void rt_queue_push_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static inline int on_rt_rq(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return rt_se->on_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) #ifdef CONFIG_UCLAMP_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * This check is only important for heterogeneous systems where uclamp_min value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * is higher than the capacity of a @cpu. For non-heterogeneous system this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * function will always return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * The function will return true if the capacity of the @cpu is >= the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * uclamp_min and false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * > uclamp_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	unsigned int min_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	unsigned int max_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	unsigned int cpu_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/* Only heterogeneous systems can benefit from this check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (!static_branch_unlikely(&sched_asym_cpucapacity))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	cpu_cap = capacity_orig_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	return cpu_cap >= min(min_cap, max_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (!rt_rq->tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return RUNTIME_INF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return rt_rq->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static inline u64 sched_rt_period(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) typedef struct task_group *rt_rq_iter_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static inline struct task_group *next_task_group(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		tg = list_entry_rcu(tg->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			typeof(struct task_group), list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (&tg->list == &task_groups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		tg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #define for_each_rt_rq(rt_rq, iter, rq)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		(iter = next_task_group(iter)) &&			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) #define for_each_sched_rt_entity(rt_se) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	for (; rt_se; rt_se = rt_se->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	return rt_se->my_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct sched_rt_entity *rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	int cpu = cpu_of(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	rt_se = rt_rq->tg->rt_se[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (rt_rq->rt_nr_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (!rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			enqueue_top_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		else if (!on_rt_rq(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			enqueue_rt_entity(rt_se, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (rt_rq->highest_prio.curr < curr->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct sched_rt_entity *rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	rt_se = rt_rq->tg->rt_se[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (!rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		dequeue_top_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	else if (on_rt_rq(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		dequeue_rt_entity(rt_se, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) static inline int rt_rq_throttled(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static int rt_se_boosted(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return !!rt_rq->rt_nr_boosted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	p = rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	return p->prio != p->normal_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static inline const struct cpumask *sched_rt_period_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	return this_rq()->rd->span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static inline const struct cpumask *sched_rt_period_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return cpu_online_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return &rt_rq->tg->rt_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #else /* !CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return rt_rq->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) static inline u64 sched_rt_period(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	return ktime_to_ns(def_rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) typedef struct rt_rq *rt_rq_iter_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) #define for_each_rt_rq(rt_rq, iter, rq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) #define for_each_sched_rt_entity(rt_se) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	for (; rt_se; rt_se = NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (!rt_rq->rt_nr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	enqueue_top_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	dequeue_top_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static inline int rt_rq_throttled(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	return rt_rq->rt_throttled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static inline const struct cpumask *sched_rt_period_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	return cpu_online_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return &cpu_rq(cpu)->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return &def_rt_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) #endif /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	return (hrtimer_active(&rt_b->rt_period_timer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		rt_rq->rt_time < rt_b->rt_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * We ran out of runtime, see if we can borrow some from our neighbours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void do_balance_runtime(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	int i, weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u64 rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	weight = cpumask_weight(rd->span);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	rt_period = ktime_to_ns(rt_b->rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	for_each_cpu(i, rd->span) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		s64 diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		if (iter == rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		raw_spin_lock(&iter->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		 * Either all rqs have inf runtime and there's nothing to steal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		 * or __disable_runtime() below sets a specific rq to inf to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		 * indicate its been disabled and disalow stealing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (iter->rt_runtime == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		 * From runqueues with spare time, take 1/n part of their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		 * spare time, but no more than our period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		diff = iter->rt_runtime - iter->rt_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (diff > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			diff = div_u64((u64)diff, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			if (rt_rq->rt_runtime + diff > rt_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 				diff = rt_period - rt_rq->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			iter->rt_runtime -= diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			rt_rq->rt_runtime += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			if (rt_rq->rt_runtime == rt_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				raw_spin_unlock(&iter->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		raw_spin_unlock(&iter->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * Ensure this RQ takes back all the runtime it lend to its neighbours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static void __disable_runtime(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct root_domain *rd = rq->rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	rt_rq_iter_t iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct rt_rq *rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (unlikely(!scheduler_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	for_each_rt_rq(rt_rq, iter, rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		s64 want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		 * Either we're all inf and nobody needs to borrow, or we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		 * already disabled and thus have nothing to do, or we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		 * exactly the right amount of runtime to take out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		if (rt_rq->rt_runtime == RUNTIME_INF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				rt_rq->rt_runtime == rt_b->rt_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			goto balanced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		 * Calculate the difference between what we started out with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		 * and what we current have, that's the amount of runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		 * we lend and now have to reclaim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 * Greedy reclaim, take back as much as we can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		for_each_cpu(i, rd->span) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			s64 diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			 * Can't reclaim from ourselves or disabled runqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			raw_spin_lock(&iter->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			if (want > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				diff = min_t(s64, iter->rt_runtime, want);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				iter->rt_runtime -= diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				want -= diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				iter->rt_runtime -= want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				want -= want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			raw_spin_unlock(&iter->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			if (!want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		 * We cannot be left wanting - that would mean some runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		 * leaked out of the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		BUG_ON(want);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) balanced:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 * Disable all the borrow logic by pretending we have inf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 * runtime - in which case borrowing doesn't make sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		rt_rq->rt_runtime = RUNTIME_INF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		rt_rq->rt_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		/* Make rt_rq available for pick_next_task() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		sched_rt_rq_enqueue(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) static void __enable_runtime(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	rt_rq_iter_t iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct rt_rq *rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (unlikely(!scheduler_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * Reset each runqueue's bandwidth settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	for_each_rt_rq(rt_rq, iter, rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		raw_spin_lock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		rt_rq->rt_runtime = rt_b->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		rt_rq->rt_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		rt_rq->rt_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		raw_spin_unlock(&rt_b->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static void balance_runtime(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (!sched_feat(RT_RUNTIME_SHARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		do_balance_runtime(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) #else /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static inline void balance_runtime(struct rt_rq *rt_rq) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	int i, idle = 1, throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	const struct cpumask *span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	span = sched_rt_period_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 * FIXME: isolated CPUs should really leave the root task group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 * whether they are isolcpus or were isolated via cpusets, lest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 * the timer run on a CPU which does not service all runqueues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 * potentially leaving other CPUs indefinitely throttled.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 * isolation is really required, the user will turn the throttle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	 * off to kill the perturbations it causes anyway.  Meanwhile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	 * this maintains functionality for boot and/or troubleshooting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (rt_b == &root_task_group.rt_bandwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		span = cpu_online_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	for_each_cpu(i, span) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		int enqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		int skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		 * When span == cpu_online_mask, taking each rq->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		 * can be time-consuming. Try to avoid it when possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			rt_rq->rt_runtime = rt_b->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		raw_spin_lock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		if (rt_rq->rt_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			u64 runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			if (rt_rq->rt_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				balance_runtime(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			runtime = rt_rq->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 				rt_rq->rt_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				enqueue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 				 * When we're idle and a woken (rt) task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 				 * throttled check_preempt_curr() will set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				 * skip_update and the time between the wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 				 * and this unthrottle will get accounted as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 				 * 'runtime'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 					rq_clock_cancel_skipupdate(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 				idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		} else if (rt_rq->rt_nr_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			if (!rt_rq_throttled(rt_rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				enqueue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		if (rt_rq->rt_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			throttled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		if (enqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			sched_rt_rq_enqueue(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		raw_spin_unlock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	return idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) static inline int rt_se_prio(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		return rt_rq->highest_prio.curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return rt_task_of(rt_se)->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	u64 runtime = sched_rt_runtime(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (rt_rq->rt_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		return rt_rq_throttled(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (runtime >= sched_rt_period(rt_rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	balance_runtime(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	runtime = sched_rt_runtime(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	if (runtime == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (rt_rq->rt_time > runtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		 * Don't actually throttle groups that have no runtime assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		 * but accrue some time due to boosting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		if (likely(rt_b->rt_runtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			rt_rq->rt_throttled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			printk_deferred_once("sched: RT throttling activated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			trace_android_vh_dump_throttled_rt_tasks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				rq_clock(rq_of_rt_rq(rt_rq)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				sched_rt_period(rt_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				hrtimer_get_expires_ns(&rt_b->rt_period_timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			 * In case we did anyway, make it go away,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			 * replenishment is a joke, since it will replenish us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			 * with exactly 0 ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			rt_rq->rt_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (rt_rq_throttled(rt_rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			sched_rt_rq_dequeue(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * Update the current task's runtime statistics. Skip current tasks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * are not in our scheduling class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void update_curr_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct task_struct *curr = rq->curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct sched_rt_entity *rt_se = &curr->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	u64 delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (curr->sched_class != &rt_sched_class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	now = rq_clock_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	delta_exec = now - curr->se.exec_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (unlikely((s64)delta_exec <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	schedstat_set(curr->se.statistics.exec_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		      max(curr->se.statistics.exec_max, delta_exec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	curr->se.sum_exec_runtime += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	account_group_exec_runtime(curr, delta_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	curr->se.exec_start = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	cgroup_account_cputime(curr, delta_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	trace_android_vh_sched_stat_runtime_rt(curr, delta_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (!rt_bandwidth_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	for_each_sched_rt_entity(rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		int exceeded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			rt_rq->rt_time += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			exceeded = sched_rt_runtime_exceeded(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			if (exceeded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			if (exceeded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) dequeue_top_rt_rq(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	BUG_ON(&rq->rt != rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!rt_rq->rt_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	BUG_ON(!rq->nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	sub_nr_running(rq, rt_rq->rt_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	rt_rq->rt_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) enqueue_top_rt_rq(struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	BUG_ON(&rq->rt != rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (rt_rq->rt_queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (rt_rq_throttled(rt_rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	if (rt_rq->rt_nr_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		add_nr_running(rq, rt_rq->rt_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		rt_rq->rt_queued = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	cpufreq_update_util(rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #if defined CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	 * Change rq's cpupri only if rt_rq is the top queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (&rq->rt != rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (rq->online && prio < prev_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct rq *rq = rq_of_rt_rq(rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	 * Change rq's cpupri only if rt_rq is the top queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (&rq->rt != rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) inc_rt_prio(struct rt_rq *rt_rq, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	int prev_prio = rt_rq->highest_prio.curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (prio < prev_prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		rt_rq->highest_prio.curr = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) dec_rt_prio(struct rt_rq *rt_rq, int prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	int prev_prio = rt_rq->highest_prio.curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (rt_rq->rt_nr_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		WARN_ON(prio < prev_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		 * This may have been our highest task, and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		 * we may have some recomputation to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		if (prio == prev_prio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			struct rt_prio_array *array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			rt_rq->highest_prio.curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				sched_find_first_bit(array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (rt_se_boosted(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		rt_rq->rt_nr_boosted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (rt_rq->tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (rt_se_boosted(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		rt_rq->rt_nr_boosted--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #else /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	start_rt_bandwidth(&def_rt_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #endif /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct rt_rq *group_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (group_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return group_rq->rt_nr_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct rt_rq *group_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (group_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		return group_rq->rr_nr_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	tsk = rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	return (tsk->policy == SCHED_RR) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	int prio = rt_se_prio(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	WARN_ON(!rt_prio(prio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	inc_rt_prio(rt_rq, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	inc_rt_migration(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	inc_rt_group(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	WARN_ON(!rt_rq->rt_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	dec_rt_migration(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	dec_rt_group(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  * Change rt_se->run_list location unless SAVE && !MOVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  * assumes ENQUEUE/DEQUEUE flags match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static inline bool move_entity(unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	list_del_init(&rt_se->run_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (list_empty(array->queue + rt_se_prio(rt_se)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	rt_se->on_list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	struct rt_prio_array *array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	struct rt_rq *group_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	 * Don't enqueue the group if its throttled, or when empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	 * The latter is a consequence of the former when a child group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	 * get throttled and the current group doesn't have any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	 * active members.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (rt_se->on_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			__delist_rt_entity(rt_se, array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (move_entity(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		WARN_ON_ONCE(rt_se->on_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		if (flags & ENQUEUE_HEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			list_add(&rt_se->run_list, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			list_add_tail(&rt_se->run_list, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		__set_bit(rt_se_prio(rt_se), array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		rt_se->on_list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	rt_se->on_rq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	inc_rt_tasks(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	struct rt_prio_array *array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (move_entity(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		WARN_ON_ONCE(!rt_se->on_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		__delist_rt_entity(rt_se, array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	rt_se->on_rq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	dec_rt_tasks(rt_se, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * Because the prio of an upper entry depends on the lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  * entries, we must remove entries top - down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct sched_rt_entity *back = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	for_each_sched_rt_entity(rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		rt_se->back = back;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		back = rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	dequeue_top_rt_rq(rt_rq_of_se(back));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		if (on_rt_rq(rt_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			__dequeue_rt_entity(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	struct rq *rq = rq_of_rt_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	dequeue_rt_stack(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	for_each_sched_rt_entity(rt_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		__enqueue_rt_entity(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	enqueue_top_rt_rq(&rq->rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	struct rq *rq = rq_of_rt_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	dequeue_rt_stack(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	for_each_sched_rt_entity(rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		if (rt_rq && rt_rq->rt_nr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			__enqueue_rt_entity(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	enqueue_top_rt_rq(&rq->rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 					bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	 * If the waker is CFS, then an RT sync wakeup would preempt the waker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	 * and force it to run for a likely small time after the RT wakee is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	 * done. So, only honor RT sync wakeups from RT wakers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return sync && task_has_rt_policy(rq->curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		p->prio <= rq->rt.highest_prio.next &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		rq->rt.rt_nr_running <= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 					bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * Adding/removing a task to/from a priority array:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	struct sched_rt_entity *rt_se = &p->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (flags & ENQUEUE_WAKEUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		rt_se->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	enqueue_rt_entity(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	    !should_honor_rt_sync(rq, p, sync))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		enqueue_pushable_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct sched_rt_entity *rt_se = &p->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	update_curr_rt(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	dequeue_rt_entity(rt_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	dequeue_pushable_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * Put task to the head or the end of the run list without the overhead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  * dequeue followed by enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (on_rt_rq(rt_se)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		struct rt_prio_array *array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		if (head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			list_move(&rt_se->run_list, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			list_move_tail(&rt_se->run_list, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	struct sched_rt_entity *rt_se = &p->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	struct rt_rq *rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	for_each_sched_rt_entity(rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		rt_rq = rt_rq_of_se(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		requeue_rt_entity(rt_rq, rt_se, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void yield_task_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	requeue_task_rt(rq, rq->curr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static int find_lowest_rq(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  * Return whether the task on the given cpu is currently non-preemptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  * while handling a potentially long softint, or if the task is likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * to block preemptions soon because it is a ksoftirq thread that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * handling slow softints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) task_may_not_preempt(struct task_struct *task, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	__u32 softirqs = per_cpu(active_softirqs, cpu) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			 __IRQ_STAT(cpu, __softirq_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	return ((softirqs & LONG_SOFTIRQ_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		(task == cpu_ksoftirqd ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) EXPORT_SYMBOL_GPL(task_may_not_preempt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) #endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	struct task_struct *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	struct rq *this_cpu_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	bool test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	int target_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	bool may_not_preempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	bool sync = !!(flags & WF_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	int this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 					flags, &target_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	if (target_cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		return target_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	/* For anything but wake ups, just return the task_cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	curr = READ_ONCE(rq->curr); /* unlocked access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	this_cpu_rq = cpu_rq(this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	 * If the current task on @p's runqueue is a softirq task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	 * it may run without preemption for a time that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	 * ill-suited for a waiting RT task. Therefore, try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	 * wake this RT task on another runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	 * Also, if the current task on @p's runqueue is an RT task, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	 * try to see if we can wake this RT task up on another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	 * runqueue. Otherwise simply start this RT task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	 * on its current runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	 * We want to avoid overloading runqueues. If the woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	 * task is a higher priority, then it will stay on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 * and the lower prio task should be moved to another CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 * Even though this will probably make the lower prio task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 * lose its cache, we do not want to bounce a higher task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	 * around just because it gave up its CPU, perhaps for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 * lock?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	 * For equal prio tasks, we just let the scheduler sort it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 * Otherwise, just let it ride on the affined RQ and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 * post-schedule router will push the preempted task away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 * This test is optimistic, if we get it wrong the load-balancer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * will have to sort it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 * We take into account the capacity of the CPU to ensure it fits the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	 * requirement of the task - which is only important on heterogeneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 * systems like big.LITTLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	may_not_preempt = task_may_not_preempt(curr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	test = (curr && (may_not_preempt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			 (unlikely(rt_task(curr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			  (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		test |= rockchip_perf_misfit_rt(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	 * Respect the sync flag as long as the task can run on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	    cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		cpu = this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (test || !rt_task_fits_capacity(p, cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		int target = find_lowest_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		 * Bail out if we were forcing a migration to find a better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		 * fitting CPU but our search failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		 * If cpu is non-preemptible, prefer remote cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		 * even if it's running a higher-prio task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		 * Otherwise: Don't bother moving it if the destination CPU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		 * not running a lower priority task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		if (target != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		    (may_not_preempt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		     p->prio < cpu_rq(target)->rt.highest_prio.curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 * Current can't be migrated, useless to reschedule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * let's hope p can move out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	if (rq->curr->nr_cpus_allowed == 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 * p is migratable, so let's not schedule it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	 * see if it is pushed or pulled somewhere else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	if (p->nr_cpus_allowed != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	    cpupri_find(&rq->rd->cpupri, p, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * There appear to be other CPUs that can accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * the current task but none can run 'p', so lets reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 * to try and push the current task away:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	requeue_task_rt(rq, p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		 * This is OK, because current is on_cpu, which avoids it being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		 * picked for load-balance and preemption/IRQs are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		 * disabled avoiding further scheduler activity on it and we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		 * not yet started the picking loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		rq_unpin_lock(rq, rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		trace_android_rvh_sched_balance_rt(rq, p, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		if (!done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			pull_rt_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		rq_repin_lock(rq, rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * Preempt the current task with a newly woken task if needed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (p->prio < rq->curr->prio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	 * If:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	 * - the newly woken task is of equal priority to the current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	 * - the newly woken task is non-migratable while current is migratable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	 * - current will be preempted on the next reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	 * we should check to see if current can readily move to a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	 * cpu.  If so, we will reschedule to allow the push logic to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	 * to move current somewhere else, making room for our non-migratable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	 * task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		check_preempt_equal_prio(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	p->se.exec_start = rq_clock_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	/* The running task is never eligible for pushing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	dequeue_pushable_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	 * If prev task was rt, put_prev_task() has already updated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	 * utilization. We only care of the case where we start to schedule a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	 * rt task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	if (rq->curr->sched_class != &rt_sched_class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	rt_queue_push_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 						   struct rt_rq *rt_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	struct rt_prio_array *array = &rt_rq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	struct sched_rt_entity *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	struct list_head *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	idx = sched_find_first_bit(array->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	BUG_ON(idx >= MAX_RT_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	queue = array->queue + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static struct task_struct *_pick_next_task_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	struct sched_rt_entity *rt_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	struct rt_rq *rt_rq  = &rq->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		rt_se = pick_next_rt_entity(rq, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		BUG_ON(!rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		rt_rq = group_rt_rq(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	} while (rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	return rt_task_of(rt_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static struct task_struct *pick_next_task_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (!sched_rt_runnable(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	p = _pick_next_task_rt(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	set_next_task_rt(rq, p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	update_curr_rt(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	 * The previous task needs to be made eligible for pushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	 * if it is still active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		enqueue_pushable_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /* Only try algorithms three times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) #define RT_MAX_TRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (!task_running(rq, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	    cpumask_test_cpu(cpu, p->cpus_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  * Return the highest pushable rq's task, which is suitable to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)  * on the CPU, NULL otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	struct plist_head *head = &rq->rt.pushable_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (!has_pushable_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	plist_for_each_entry(p, head, pushable_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		if (pick_rt_task(rq, p, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int find_lowest_rq(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	struct sched_domain *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	int cpu      = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	/* Make sure the mask is initialized first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (unlikely(!lowest_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (task->nr_cpus_allowed == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		return -1; /* No other targets possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 * If we're on asym system ensure we consider the different capacities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 * of the CPUs when searching for the lowest_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 					  task, lowest_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 					  rt_task_fits_capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		ret = cpupri_find(&task_rq(task)->rd->cpupri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 				  task, lowest_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		return -1; /* No targets found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	cpu = task_cpu(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (IS_ENABLED(CONFIG_ROCKCHIP_PERFORMANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		cpu = rockchip_perf_select_rt_cpu(cpu, lowest_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	 * At this point we have built a mask of CPUs representing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	 * lowest priority tasks in the system.  Now we want to elect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	 * the best one based on our affinity and topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	 * We prioritize the last CPU that the task executed on since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	 * it is most likely cache-hot in that location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	if (cpumask_test_cpu(cpu, lowest_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	 * Otherwise, we consult the sched_domains span maps to figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	 * out which CPU is logically closest to our hot cache data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	for_each_domain(cpu, sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		if (sd->flags & SD_WAKE_AFFINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			int best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 			 * "this_cpu" is cheaper to preempt than a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			 * remote processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			if (this_cpu != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 				return this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			best_cpu = cpumask_first_and(lowest_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 						     sched_domain_span(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			if (best_cpu < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 				rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 				return best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * And finally, if there were no matches within the domains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * just give the caller *something* to work with from the compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	if (this_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		return this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	cpu = cpumask_any(lowest_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /* Will lock the rq it finds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	struct rq *lowest_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		cpu = find_lowest_rq(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		if ((cpu == -1) || (cpu == rq->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		lowest_rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			 * Target rq has tasks of equal or higher priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			 * retrying does not release any lock and is unlikely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			 * to yield a different result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			lowest_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		/* if the prio of this runqueue changed, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		if (double_lock_balance(rq, lowest_rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			 * We had to unlock the run queue. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			 * the mean time, task could have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			 * migrated already or had its affinity changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			 * Also make sure that it wasn't scheduled on its rq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			if (unlikely(task_rq(task) != rq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 				     task_running(rq, task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 				     !rt_task(task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 				     !task_on_rq_queued(task))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 				double_unlock_balance(rq, lowest_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 				lowest_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		/* If this rq is still suitable use it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		if (lowest_rq->rt.highest_prio.curr > task->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		/* try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		double_unlock_balance(rq, lowest_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		lowest_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	return lowest_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static struct task_struct *pick_next_pushable_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (!has_pushable_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	p = plist_first_entry(&rq->rt.pushable_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			      struct task_struct, pushable_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	BUG_ON(rq->cpu != task_cpu(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	BUG_ON(task_current(rq, p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	BUG_ON(p->nr_cpus_allowed <= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	BUG_ON(!task_on_rq_queued(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	BUG_ON(!rt_task(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)  * If the current CPU has more than one RT task, see if the non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  * running task can migrate over to a CPU that is running a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  * of lesser priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static int push_rt_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	struct task_struct *next_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	struct rq *lowest_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (!rq->rt.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	next_task = pick_next_pushable_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	if (!next_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	if (WARN_ON(next_task == rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	 * It's possible that the next_task slipped in of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	 * higher priority than current. If that's the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	 * just reschedule current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	if (unlikely(next_task->prio < rq->curr->prio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	/* We might release rq lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	get_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	/* find_lock_lowest_rq locks the rq if found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	lowest_rq = find_lock_lowest_rq(next_task, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (!lowest_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		 * find_lock_lowest_rq releases rq->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		 * so it is possible that next_task has migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		 * We need to make sure that the task is still on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		 * run-queue and is also still the next task eligible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		 * pushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		task = pick_next_pushable_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		if (task == next_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			 * The task hasn't migrated, and is still the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			 * eligible task, but we failed to find a run-queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			 * to push it to.  Do not retry in this case, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			 * other CPUs will pull from us when ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		if (!task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			/* No more tasks, just exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		 * Something has shifted, try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		put_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		next_task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	deactivate_task(rq, next_task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	set_task_cpu(next_task, lowest_rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	activate_task(lowest_rq, next_task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	resched_curr(lowest_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	double_unlock_balance(rq, lowest_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	put_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static void push_rt_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	/* push_rt_task will return true if it moved an RT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	while (push_rt_task(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) #ifdef HAVE_RT_PUSH_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)  * When a high priority task schedules out from a CPU and a lower priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)  * task is scheduled in, a check is made to see if there's any RT tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)  * on other CPUs that are waiting to run because a higher priority RT task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)  * is currently running on its CPU. In this case, the CPU with multiple RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)  * up that may be able to run one of its non-running queued RT tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)  * All CPUs with overloaded RT tasks need to be notified as there is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)  * no way to know which of these CPUs have the highest priority task waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)  * to run. Instead of trying to take a spinlock on each of these CPUs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)  * which has shown to cause large latency when done on machines with many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * RT tasks waiting to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  * Just sending an IPI to each of the CPUs is also an issue, as on large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * count CPU machines, this can cause an IPI storm on a CPU, especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  * if its the only CPU with multiple RT tasks queued, and a large number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * of CPUs scheduling a lower priority task at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * Each root domain has its own irq work function that can iterate over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * tassk must be checked if there's one or many CPUs that are lowering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  * their priority, there's a single irq work iterator that will try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)  * push off RT tasks that are waiting to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  * When a CPU schedules a lower priority task, it will kick off the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * irq work iterator that will jump to each CPU with overloaded RT tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  * As it only takes the first CPU that schedules a lower priority task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)  * to start the process, the rto_start variable is incremented and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)  * the atomic result is one, then that CPU will try to take the rto_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)  * This prevents high contention on the lock as the process handles all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)  * CPUs scheduling lower priority tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)  * All CPUs that are scheduling a lower priority task will increment the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)  * rt_loop_next variable. This will make sure that the irq work iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * priority task, even if the iterator is in the middle of a scan. Incrementing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  * the rt_loop_next will cause the iterator to perform another scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static int rto_next_cpu(struct root_domain *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	 * rt_next_cpu() will simply return the first CPU found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	 * the rto_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	 * will return the next CPU found in the rto_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	 * If there are no more CPUs left in the rto_mask, then a check is made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	 * the rto_lock held, but any CPU may increment the rto_loop_next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	 * without any locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		/* When rto_cpu is -1 this acts like cpumask_first() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		rd->rto_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		rd->rto_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		 * ACQUIRE ensures we see the @rto_mask changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		 * made prior to the @next value observed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		 * Matches WMB in rt_set_overload().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		next = atomic_read_acquire(&rd->rto_loop_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		if (rd->rto_loop == next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		rd->rto_loop = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static inline bool rto_start_trylock(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	return !atomic_cmpxchg_acquire(v, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) static inline void rto_start_unlock(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	atomic_set_release(v, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static void tell_cpu_to_push(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	int cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	/* Keep the loop going if the IPI is currently active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	atomic_inc(&rq->rd->rto_loop_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	/* Only one CPU can initiate a loop at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	raw_spin_lock(&rq->rd->rto_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	 * The rto_cpu is updated under the lock, if it has a valid CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	 * then the IPI is still running and will continue due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	 * update to loop_next, and nothing needs to be done here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	 * Otherwise it is finishing up and an ipi needs to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	if (rq->rd->rto_cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		cpu = rto_next_cpu(rq->rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	raw_spin_unlock(&rq->rd->rto_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	rto_start_unlock(&rq->rd->rto_loop_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	if (cpu >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		/* Make sure the rd does not get freed while pushing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		sched_get_rd(rq->rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) /* Called from hardirq context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) void rto_push_irq_work_func(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	struct root_domain *rd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		container_of(work, struct root_domain, rto_push_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	rq = this_rq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	 * We do not need to grab the lock to check for has_pushable_tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	 * When it gets updated, a check is made if a push is possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	if (has_pushable_tasks(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		raw_spin_lock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		push_rt_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		raw_spin_unlock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	raw_spin_lock(&rd->rto_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	/* Pass the IPI to the next rt overloaded queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	cpu = rto_next_cpu(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	raw_spin_unlock(&rd->rto_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	if (cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		sched_put_rd(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	/* Try the next RT overloaded CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	irq_work_queue_on(&rd->rto_push_work, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) #endif /* HAVE_RT_PUSH_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) static void pull_rt_task(struct rq *this_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	int this_cpu = this_rq->cpu, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	bool resched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	struct rq *src_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	int rt_overload_count = rt_overloaded(this_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	if (likely(!rt_overload_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	 * see overloaded we must also see the rto_mask bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	/* If we are the only overloaded CPU do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	if (rt_overload_count == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) #ifdef HAVE_RT_PUSH_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	if (sched_feat(RT_PUSH_IPI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		tell_cpu_to_push(this_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		if (this_cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		src_rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		 * Don't bother taking the src_rq->lock if the next highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		 * task is known to be lower-priority than our current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		 * This may look racy, but if this value is about to go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		 * logically higher, the src_rq will push this task away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		 * And if its going logically lower, we do not care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		if (src_rq->rt.highest_prio.next >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		    this_rq->rt.highest_prio.curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		 * We can potentially drop this_rq's lock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		 * double_lock_balance, and another CPU could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		 * alter this_rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		double_lock_balance(this_rq, src_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		 * We can pull only a task, which is pushable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		 * on its rq, and no others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		p = pick_highest_pushable_task(src_rq, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		 * Do we have an RT task that preempts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		 * the to-be-scheduled task?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			WARN_ON(p == src_rq->curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 			WARN_ON(!task_on_rq_queued(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 			 * There's a chance that p is higher in priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			 * than what's currently running on its CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			 * This is just that p is wakeing up and hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			 * had a chance to schedule. We only pull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			 * p if it is lower in priority than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			 * current task on the run queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 			if (p->prio < src_rq->curr->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 				goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			resched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			deactivate_task(src_rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 			set_task_cpu(p, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			activate_task(this_rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			 * We continue with the search, just in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			 * case there's an even higher prio task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			 * in another runqueue. (low likelihood
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			 * but possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		double_unlock_balance(this_rq, src_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	if (resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		resched_curr(this_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)  * If we are not running and we are not going to reschedule soon, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)  * try to push tasks away now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) static void task_woken_rt(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	bool need_to_push = !task_running(rq, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 			    !test_tsk_need_resched(rq->curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 			    p->nr_cpus_allowed > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 			    (rq->curr->nr_cpus_allowed < 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 			     rq->curr->prio <= p->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	if (need_to_push)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		push_rt_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* Assumes rq->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static void rq_online_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	if (rq->rt.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		rt_set_overload(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	__enable_runtime(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /* Assumes rq->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) static void rq_offline_rt(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (rq->rt.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		rt_clear_overload(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	__disable_runtime(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)  * When switch from the rt queue, we bring ourselves to a position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)  * that we might want to pull RT tasks from other runqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static void switched_from_rt(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	 * If there are other RT tasks then we will reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	 * and the scheduling of the other RT tasks will handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	 * the balancing. But if we are the last RT task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	 * we may need to handle the pulling of RT tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	 * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	rt_queue_pull_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) void __init init_sched_rt_class(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 					GFP_KERNEL, cpu_to_node(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)  * When switching a task to RT, we may overload the runqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)  * with RT tasks. In this case we try to push them off to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)  * other runqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static void switched_to_rt(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	 * If we are running, update the avg_rt tracking, as the running time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	 * will now on be accounted into the latter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (task_current(rq, p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	 * If we are not running we may need to preempt the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	 * running task. If that current running task is also an RT task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	 * then see if we can move to another run queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	if (task_on_rq_queued(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			rt_queue_push_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)  * Priority of the task has changed. This may cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)  * us to initiate a push or pull.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	if (!task_on_rq_queued(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	if (rq->curr == p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		 * If our priority decreases while running, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		 * may need to pull tasks to this runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		if (oldprio < p->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			rt_queue_pull_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		 * If there's a higher priority task waiting to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		 * then reschedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		if (p->prio > rq->rt.highest_prio.curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		/* For UP simply resched on drop of prio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		if (oldprio < p->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		 * This task is not running, but if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		 * greater than the current running task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		 * then reschedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		if (p->prio < rq->curr->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) #ifdef CONFIG_POSIX_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) static void watchdog(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	unsigned long soft, hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	/* max may change after cur was read, this will be fixed next tick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	soft = task_rlimit(p, RLIMIT_RTTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	if (soft != RLIM_INFINITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		if (p->rt.watchdog_stamp != jiffies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 			p->rt.timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 			p->rt.watchdog_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		if (p->rt.timeout > next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 						    p->se.sum_exec_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static inline void watchdog(struct rq *rq, struct task_struct *p) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)  * scheduler tick hitting a task of our scheduling class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)  * NOTE: This function can be called remotely by the tick offload that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)  * goes along full dynticks. Therefore no local assumption can be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)  * and everything must be accessed through the @rq and @curr passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)  * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	struct sched_rt_entity *rt_se = &p->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	update_curr_rt(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	watchdog(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	 * RR tasks need a special form of timeslice management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	 * FIFO tasks have no timeslices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	if (p->policy != SCHED_RR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	if (--p->rt.time_slice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	p->rt.time_slice = sched_rr_timeslice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	 * Requeue to the end of queue if we (and all of our ancestors) are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	 * the only element on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	for_each_sched_rt_entity(rt_se) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		if (rt_se->run_list.prev != rt_se->run_list.next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 			requeue_task_rt(rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 			resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	 * Time slice is 0 for SCHED_FIFO tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	if (task->policy == SCHED_RR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		return sched_rr_timeslice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) const struct sched_class rt_sched_class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	__section("__rt_sched_class") = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	.enqueue_task		= enqueue_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	.dequeue_task		= dequeue_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	.yield_task		= yield_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	.check_preempt_curr	= check_preempt_curr_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	.pick_next_task		= pick_next_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	.put_prev_task		= put_prev_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	.set_next_task          = set_next_task_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	.balance		= balance_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	.select_task_rq		= select_task_rq_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	.set_cpus_allowed       = set_cpus_allowed_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	.rq_online              = rq_online_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	.rq_offline             = rq_offline_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	.task_woken		= task_woken_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	.switched_from		= switched_from_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	.task_tick		= task_tick_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	.get_rr_interval	= get_rr_interval_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	.prio_changed		= prio_changed_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	.switched_to		= switched_to_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	.update_curr		= update_curr_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) #ifdef CONFIG_UCLAMP_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	.uclamp_enabled		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) #ifdef CONFIG_RT_GROUP_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  * Ensure that the real time constraints are schedulable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static DEFINE_MUTEX(rt_constraints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static inline int tg_has_rt_tasks(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	struct css_task_iter it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	 * Autogroups do not have RT tasks; see autogroup_create().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	if (task_group_is_autogroup(tg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	css_task_iter_start(&tg->css, 0, &it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	while (!ret && (task = css_task_iter_next(&it)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		ret |= rt_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	css_task_iter_end(&it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct rt_schedulable_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	struct task_group *tg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	u64 rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	u64 rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static int tg_rt_schedulable(struct task_group *tg, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	struct rt_schedulable_data *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	struct task_group *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	unsigned long total, sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	u64 period, runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	runtime = tg->rt_bandwidth.rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	if (tg == d->tg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 		period = d->rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		runtime = d->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	 * Cannot have more runtime than the period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	if (runtime > period && runtime != RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	 * Ensure we don't starve existing RT tasks if runtime turns zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (rt_bandwidth_enabled() && !runtime &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	total = to_ratio(period, runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	 * Nobody can have more than the global setting allows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	 * The sum of our children's runtime should not exceed our own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	list_for_each_entry_rcu(child, &tg->children, siblings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 		runtime = child->rt_bandwidth.rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		if (child == d->tg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 			period = d->rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			runtime = d->rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		sum += to_ratio(period, runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	if (sum > total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	struct rt_schedulable_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		.tg = tg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		.rt_period = period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		.rt_runtime = runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static int tg_set_rt_bandwidth(struct task_group *tg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		u64 rt_period, u64 rt_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	 * Disallowing the root group RT runtime is BAD, it would disallow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	 * kernel creating (and or operating) RT threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	if (tg == &root_task_group && rt_runtime == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	/* No period doesn't make any sense. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	if (rt_period == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	 * Bound quota to defend quota against overflow during bandwidth shift.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	mutex_lock(&rt_constraints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	err = __rt_schedulable(tg, rt_period, rt_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	tg->rt_bandwidth.rt_runtime = rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		struct rt_rq *rt_rq = tg->rt_rq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		rt_rq->rt_runtime = rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	mutex_unlock(&rt_constraints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	u64 rt_runtime, rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (rt_runtime_us < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		rt_runtime = RUNTIME_INF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) long sched_group_rt_runtime(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	u64 rt_runtime_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	do_div(rt_runtime_us, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	return rt_runtime_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	u64 rt_runtime, rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	rt_period = rt_period_us * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	rt_runtime = tg->rt_bandwidth.rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) long sched_group_rt_period(struct task_group *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	u64 rt_period_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	do_div(rt_period_us, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	return rt_period_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) static int sched_rt_global_constraints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	mutex_lock(&rt_constraints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	ret = __rt_schedulable(NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	mutex_unlock(&rt_constraints_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	/* Don't accept realtime tasks when there is no way for them to run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) #else /* !CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) static int sched_rt_global_constraints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		rt_rq->rt_runtime = global_rt_runtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) #endif /* CONFIG_RT_GROUP_SCHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static int sched_rt_global_validate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (sysctl_sched_rt_period <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		 ((u64)sysctl_sched_rt_runtime *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			NSEC_PER_USEC > max_rt_runtime)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) static void sched_rt_do_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 		size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	int old_period, old_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	static DEFINE_MUTEX(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	mutex_lock(&mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	old_period = sysctl_sched_rt_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	old_runtime = sysctl_sched_rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	if (!ret && write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		ret = sched_rt_global_validate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 			goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		ret = sched_dl_global_validate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 			goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 		ret = sched_rt_global_constraints();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 			goto undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		sched_rt_do_global();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		sched_dl_do_global();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	if (0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) undo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		sysctl_sched_rt_period = old_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		sysctl_sched_rt_runtime = old_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	mutex_unlock(&mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	static DEFINE_MUTEX(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	mutex_lock(&mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	 * Make sure that internally we keep jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	 * Also, writing zero resets the timeslice to default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	if (!ret && write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		sched_rr_timeslice =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	mutex_unlock(&mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) #ifdef CONFIG_SCHED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) void print_rt_stats(struct seq_file *m, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	rt_rq_iter_t iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	struct rt_rq *rt_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		print_rt_rq(m, cpu, rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) #endif /* CONFIG_SCHED_DEBUG */