^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Deadline Scheduling Class (SCHED_DEADLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Tasks that periodically executes their instances for less than their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * runtime won't miss any of their deadlines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Tasks that are not periodic or sporadic or that tries to execute more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * than their reserved bandwidth will be slowed down (and may potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * miss some of their deadlines), and won't affect any other task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Juri Lelli <juri.lelli@gmail.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Michael Trimarchi <michael@amarulasolutions.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Fabio Checconi <fchecconi@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "pelt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct dl_bandwidth def_dl_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return container_of(dl_se, struct task_struct, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return container_of(dl_rq, struct rq, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct rq *rq = task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return &rq->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline int on_dl_rq(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return !RB_EMPTY_NODE(&dl_se->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return dl_se->pi_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return pi_of(dl_se) != dl_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return dl_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline struct dl_bw *dl_bw_of(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "sched RCU must be held");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return &cpu_rq(i)->rd->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int dl_bw_cpus(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct root_domain *rd = cpu_rq(i)->rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) "sched RCU must be held");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (cpumask_subset(rd->span, cpu_active_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return cpumask_weight(rd->span);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) for_each_cpu_and(i, rd->span, cpu_active_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline unsigned long __dl_bw_capacity(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct root_domain *rd = cpu_rq(i)->rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long cap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) "sched RCU must be held");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for_each_cpu_and(i, rd->span, cpu_active_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cap += capacity_orig_of(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * of the CPU the task is running on rather rd's \Sum CPU capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline unsigned long dl_bw_capacity(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return __dl_bw_capacity(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline struct dl_bw *dl_bw_of(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return &cpu_rq(i)->dl.dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline int dl_bw_cpus(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline unsigned long dl_bw_capacity(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 old = dl_rq->running_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dl_rq->running_bw += dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* kick cpufreq (see the comment in kernel/sched/sched.h). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u64 old = dl_rq->running_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dl_rq->running_bw -= dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (dl_rq->running_bw > old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dl_rq->running_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* kick cpufreq (see the comment in kernel/sched/sched.h). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 old = dl_rq->this_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dl_rq->this_bw += dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u64 old = dl_rq->this_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dl_rq->this_bw -= dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (dl_rq->this_bw > old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) dl_rq->this_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __add_rq_bw(dl_se->dl_bw, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __sub_rq_bw(dl_se->dl_bw, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) __add_running_bw(dl_se->dl_bw, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __sub_running_bw(dl_se->dl_bw, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void dl_change_utilization(struct task_struct *p, u64 new_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (task_on_rq_queued(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rq = task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (p->dl.dl_non_contending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) sub_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) p->dl.dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * If the timer handler is currently running and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * timer cannot be cancelled, inactive_task_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * will see that dl_not_contending is not set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * will not touch the rq's active utilization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * so we are still safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __sub_rq_bw(p->dl.dl_bw, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) __add_rq_bw(new_bw, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * The utilization of a task cannot be immediately removed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * the rq active utilization (running_bw) when the task blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Instead, we have to wait for the so called "0-lag time".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * If a task blocks before the "0-lag time", a timer (the inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * timer) is armed, and running_bw is decreased when the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * fires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * If the task wakes up again before the inactive timer fires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * the timer is cancelled, whereas if the task wakes up after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * inactive timer fired (and running_bw has been decreased) the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * task's utilization has to be added to running_bw again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * A flag in the deadline scheduling entity (dl_non_contending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * is used to avoid race conditions between the inactive timer handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * and task wakeups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * The following diagram shows how running_bw is updated. A task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * "ACTIVE" when its utilization contributes to running_bw; an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * "ACTIVE contending" task is in the TASK_RUNNING state, while an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * time already passed, which does not contribute to running_bw anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * +------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * wakeup | ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * +------------------>+ contending |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * | add_running_bw | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * | +----+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * | | ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * | dequeue | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * +--------+-------+ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * | | t >= 0-lag | | wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * | INACTIVE |<---------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * | | sub_running_bw | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * +--------+-------+ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * ^ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * | t < 0-lag | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * | V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * | +----+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * | sub_running_bw | ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * +-------------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * inactive timer | non contending |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * fired +------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * The task_non_contending() function is invoked when a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * blocks, and checks if the 0-lag time already passed or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * not (in the first case, it directly updates running_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * in the second case, it arms the inactive timer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The task_contending() function is invoked when a task wakes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * up, and checks if the task is still in the "ACTIVE non contending"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * state or not (in the second case, it updates running_bw).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void task_non_contending(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct hrtimer *timer = &dl_se->inactive_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) s64 zerolag_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * If this is a non-deadline task that has been boosted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (dl_se->dl_runtime == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) WARN_ON(dl_se->dl_non_contending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) zerolag_time = dl_se->deadline -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) div64_long((dl_se->runtime * dl_se->dl_period),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dl_se->dl_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Using relative times instead of the absolute "0-lag time"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * allows to simplify the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) zerolag_time -= rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * If the "0-lag time" already passed, decrease the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * utilization now, instead of starting a timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (dl_task(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) sub_running_bw(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!dl_task(p) || p->state == TASK_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (p->state == TASK_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __dl_clear_params(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dl_se->dl_non_contending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) get_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void task_contending(struct sched_dl_entity *dl_se, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * If this is a non-deadline task that has been boosted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (dl_se->dl_runtime == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (flags & ENQUEUE_MIGRATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) add_rq_bw(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (dl_se->dl_non_contending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dl_se->dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * If the timer handler is currently running and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * timer cannot be cancelled, inactive_task_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * will see that dl_not_contending is not set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * will not touch the rq's active utilization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * so we are still safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) put_task_struct(dl_task_of(dl_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Since "dl_non_contending" is not set, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * task's utilization has already been removed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * active utilization (either when the task blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * when the "inactive timer" fired).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * So, add it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) add_running_bw(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return dl_rq->root.rb_leftmost == &dl_se->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) raw_spin_lock_init(&dl_b->dl_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dl_b->dl_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dl_b->dl_runtime = runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void init_dl_bw(struct dl_bw *dl_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) raw_spin_lock_init(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (global_rt_runtime() == RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dl_b->bw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dl_b->total_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void init_dl_rq(struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dl_rq->root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* zero means no -deadline tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dl_rq->dl_nr_migratory = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dl_rq->overloaded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) init_dl_bw(&dl_rq->dl_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dl_rq->running_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dl_rq->this_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) init_dl_rq_bw_ratio(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static inline int dl_overloaded(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return atomic_read(&rq->rd->dlo_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static inline void dl_set_overload(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!rq->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * Must be visible before the overload count is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * set (as in sched_rt.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Matched by the barrier in pull_dl_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) atomic_inc(&rq->rd->dlo_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static inline void dl_clear_overload(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!rq->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) atomic_dec(&rq->rd->dlo_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void update_dl_migration(struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!dl_rq->overloaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dl_set_overload(rq_of_dl_rq(dl_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dl_rq->overloaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) } else if (dl_rq->overloaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dl_clear_overload(rq_of_dl_rq(dl_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dl_rq->overloaded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dl_rq->dl_nr_migratory++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) update_dl_migration(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dl_rq->dl_nr_migratory--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) update_dl_migration(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * The list of pushable -deadline task is not a plist, like in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct dl_rq *dl_rq = &rq->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct task_struct *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) entry = rb_entry(parent, struct task_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pushable_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (dl_entity_preempt(&p->dl, &entry->dl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) link = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) link = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (leftmost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dl_rq->earliest_dl.next = p->dl.deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rb_link_node(&p->pushable_dl_tasks, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rb_insert_color_cached(&p->pushable_dl_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) &dl_rq->pushable_dl_tasks_root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct dl_rq *dl_rq = &rq->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct rb_node *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) next_node = rb_next(&p->pushable_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (next_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dl_rq->earliest_dl.next = rb_entry(next_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct task_struct, pushable_dl_tasks)->dl.deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) RB_CLEAR_NODE(&p->pushable_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static inline int has_pushable_dl_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static int push_dl_task(struct rq *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return dl_task(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static DEFINE_PER_CPU(struct callback_head, dl_push_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void push_dl_tasks(struct rq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void pull_dl_task(struct rq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static inline void deadline_queue_push_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!has_pushable_dl_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static inline void deadline_queue_pull_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct rq *later_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) later_rq = find_lock_later_rq(p, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!later_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * If we cannot preempt any rq, fall back to pick any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * online CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (cpu >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Failed to find any suitable CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * The task will never come back!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) BUG_ON(dl_bandwidth_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * If admission control is disabled we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * try a little harder to let the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) cpu = cpumask_any(cpu_active_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) later_rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) double_lock_balance(rq, later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (p->dl.dl_non_contending || p->dl.dl_throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Inactive timer is armed (or callback is running, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * waiting for us to release rq locks). In any case, when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * will fire (or continue), it will see running_bw of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * task migrated to later_rq (and correctly handle it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sub_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) add_rq_bw(&p->dl, &later_rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) add_running_bw(&p->dl, &later_rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) add_rq_bw(&p->dl, &later_rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * And we finally need to fixup root_domain(s) bandwidth accounting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * since p is still hanging out in the old (now moved to default) root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dl_b = &rq->rd->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dl_b = &later_rq->rd->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) set_task_cpu(p, later_rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) double_unlock_balance(later_rq, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return later_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static inline void pull_dl_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static inline void deadline_queue_push_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static inline void deadline_queue_pull_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * We are being explicitly informed that a new instance is starting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * and this means that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * - the absolute deadline of the entity has to be placed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * current time + relative deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * - the runtime of the entity has to be set to the maximum value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * The capability of specifying such event is useful whenever a -deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * entity wants to (try to!) synchronize its behaviour with the scheduler's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * one, and to (try to!) reconcile itself with its own scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) WARN_ON(is_dl_boosted(dl_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * We are racing with the deadline timer. So, do nothing because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * the deadline timer handler will take care of properly recharging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * the runtime and postponing the deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (dl_se->dl_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * We use the regular wall clock time to set deadlines in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * future; in fact, we must consider execution overheads (time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * spent on hardirq context, etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dl_se->runtime = dl_se->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Pure Earliest Deadline First (EDF) scheduling does not deal with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * possibility of a entity lasting more than what it declared, and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * exhausting its runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Here we are interested in making runtime overrun possible, but we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * not want a entity which is misbehaving to affect the scheduling of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * other entities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * is used, in order to confine each entity within its own bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * This function deals exactly with that, and ensures that when the runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * of a entity is replenished, its deadline is also postponed. That ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * the overrunning entity can't interfere with other entity in the system and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * can't make them miss their deadlines. Reasons why this kind of overruns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * could happen are, typically, a entity voluntarily trying to overcome its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * runtime, or it just underestimated it during sched_setattr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void replenish_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * This could be the case for a !-dl task that is boosted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Just go with full inherited parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (dl_se->dl_deadline == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dl_se->runtime = pi_of(dl_se)->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (dl_se->dl_yielded && dl_se->runtime > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dl_se->runtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * We keep moving the deadline away until we get some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * available runtime for the entity. This ensures correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * handling of situations where the runtime overrun is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * arbitrary large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) while (dl_se->runtime <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dl_se->deadline += pi_of(dl_se)->dl_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dl_se->runtime += pi_of(dl_se)->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * At this point, the deadline really should be "in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * the future" with respect to rq->clock. If it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * not, we are, for some reason, lagging too much!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Anyway, after having warn userspace abut that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * we still try to keep the things running by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * resetting the deadline and the budget of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * entity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) printk_deferred_once("sched: DL replenish lagged too much\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dl_se->runtime = pi_of(dl_se)->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (dl_se->dl_yielded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dl_se->dl_yielded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (dl_se->dl_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dl_se->dl_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Here we check if --at time t-- an entity (which is probably being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * [re]activated or, in general, enqueued) can use its remaining runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * and its current deadline _without_ exceeding the bandwidth it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * assigned (function returns true if it can't). We are in fact applying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * one of the CBS rules: when a task wakes up, if the residual runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * over residual deadline fits within the allocated bandwidth, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * can keep the current (absolute) deadline and residual budget without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * disrupting the schedulability of the system. Otherwise, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * refill the runtime and set the deadline a period in the future,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * because keeping the current (absolute) deadline of the task would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * result in breaking guarantees promised to other tasks (refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Documentation/scheduler/sched-deadline.rst for more information).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * This function returns true if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * runtime / (deadline - t) > dl_runtime / dl_deadline ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * IOW we can't recycle current parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * Notice that the bandwidth check is done against the deadline. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * task with deadline equal to period this is the same of using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * dl_period instead of dl_deadline in the equation above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u64 left, right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * left and right are the two sides of the equation above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * after a bit of shuffling to use multiplications instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * of divisions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Note that none of the time values involved in the two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * multiplications are absolute: dl_deadline and dl_runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * are the relative deadline and the maximum runtime of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * instance, runtime is the runtime left for the last instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * and (deadline - t), since t is rq->clock, is the time left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * to the (absolute) deadline. Even if overflowing the u64 type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * is very unlikely to occur in both cases, here we scale down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * as we want to avoid that risk at all. Scaling down by 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * means that we reduce granularity to 1us. We are fine with it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * since this is only a true/false check and, anyway, thinking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * of anything below microseconds resolution is actually fiction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * (but still we want to give the user that illusion >;).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) right = ((dl_se->deadline - t) >> DL_SCALE) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) (pi_of(dl_se)->dl_runtime >> DL_SCALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return dl_time_before(right, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Revised wakeup rule [1]: For self-suspending tasks, rather then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * re-initializing task's runtime and deadline, the revised wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * rule adjusts the task's runtime to avoid the task to overrun its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * density.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Reasoning: a task may overrun the density if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * runtime / (deadline - t) > dl_runtime / dl_deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Therefore, runtime can be adjusted to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * runtime = (dl_runtime / dl_deadline) * (deadline - t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * In such way that runtime will be equal to the maximum density
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * the task can use without breaking any rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) u64 laxity = dl_se->deadline - rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * If the task has deadline < period, and the deadline is in the past,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * it should already be throttled before this check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * See update_dl_entity() comments for further details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Regarding the deadline, a task with implicit deadline has a relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * deadline == relative period. A task with constrained deadline has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * relative deadline <= relative period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * We support constrained deadline tasks. However, there are some restrictions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * applied only for tasks which do not have an implicit deadline. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * update_dl_entity() to know more about such restrictions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * The dl_is_implicit() returns true if the task has an implicit deadline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return dl_se->dl_deadline == dl_se->dl_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * When a deadline entity is placed in the runqueue, its runtime and deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * might need to be updated. This is done by a CBS wake up rule. There are two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * different rules: 1) the original CBS; and 2) the Revisited CBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * When the task is starting a new period, the Original CBS is used. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * case, the runtime is replenished and a new absolute deadline is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * When a task is queued before the begin of the next period, using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * remaining runtime and deadline could make the entity to overflow, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * dl_entity_overflow() to find more about runtime overflow. When such case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * is detected, the runtime and deadline need to be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * If the task has an implicit deadline, i.e., deadline == period, the Original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * CBS is applied. the runtime is replenished and a new absolute deadline is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * set, as in the previous cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * However, the Original CBS does not work properly for tasks with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * deadline < period, which are said to have a constrained deadline. By
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * applying the Original CBS, a constrained deadline task would be able to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * runtime/deadline in a period. With deadline < period, the task would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * overrun the runtime/period allowed bandwidth, breaking the admission test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * In order to prevent this misbehave, the Revisited CBS is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * constrained deadline tasks when a runtime overflow is detected. In the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Revisited CBS, rather than replenishing & setting a new absolute deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * the remaining runtime of the task is reduced to avoid runtime overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * Please refer to the comments update_dl_revised_wakeup() function to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * more about the Revised CBS rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static void update_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dl_entity_overflow(dl_se, rq_clock(rq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (unlikely(!dl_is_implicit(dl_se) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) !is_dl_boosted(dl_se))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) update_dl_revised_wakeup(dl_se, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dl_se->runtime = pi_of(dl_se)->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * If the entity depleted all its runtime, and if we want it to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * while waiting for some new execution time to become available, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * set the bandwidth replenishment timer to the replenishment instant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * and try to activate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Notice that it is important for the caller to know if the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * actually started or not (i.e., the replenishment instant is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * the future or in the past).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int start_dl_timer(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct hrtimer *timer = &dl_se->dl_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct rq *rq = task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ktime_t now, act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) lockdep_assert_held(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * We want the timer to fire at the deadline, but considering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * that it is actually coming from rq->clock and not from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * hrtimer's time base reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) act = ns_to_ktime(dl_next_period(dl_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) now = hrtimer_cb_get_time(timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) delta = ktime_to_ns(now) - rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) act = ktime_add_ns(act, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * If the expiry time already passed, e.g., because the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * chosen as the deadline is too small, don't even try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * start the timer in the past!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (ktime_us_delta(act, now) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * !enqueued will guarantee another callback; even if one is already in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * progress. This ensures a balanced {get,put}_task_struct().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * The race against __run_timer() clearing the enqueued state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * harmless because we're holding task_rq()->lock, therefore the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * expiring after we've done the check will wait on its task_rq_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * and observe our state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!hrtimer_is_queued(timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) get_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * This is the bandwidth enforcement timer callback. If here, we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * a task is not on its dl_rq, since the fact that the timer was running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * means the task is throttled and needs a runtime replenishment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * However, what we actually do depends on the fact the task is active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * (it is on its rq) or has been removed from there by a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * dequeue_task_dl(). In the former case we must issue the runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * replenishment and add the task back to the dl_rq; in the latter, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * do nothing but clearing dl_throttled, so that runtime and deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * updating (and the queueing back to dl_rq) will be done by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * next call to enqueue_task_dl().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct sched_dl_entity *dl_se = container_of(timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct sched_dl_entity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) dl_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct rq_flags rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) rq = task_rq_lock(p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * The task might have changed its scheduling policy to something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * different than SCHED_DEADLINE (through switched_from_dl()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!dl_task(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * The task might have been boosted by someone else and might be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * boosting/deboosting path, its not throttled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (is_dl_boosted(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Spurious timer due to start_dl_timer() race; or we already received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * a replenishment from rt_mutex_setprio().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!dl_se->dl_throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) sched_clock_tick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * If the throttle happened during sched-out; like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * deactivate_task()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * dequeue_task_dl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * update_curr_dl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * start_dl_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * __dequeue_task_dl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * prev->on_rq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * We can be both throttled and !queued. Replenish the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * but do not enqueue -- wait for our wakeup to do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (!task_on_rq_queued(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) replenish_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (unlikely(!rq->online)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * If the runqueue is no longer available, migrate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * task elsewhere. This necessarily changes rq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) lockdep_unpin_lock(&rq->lock, rf.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) rq = dl_task_offline_migration(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rf.cookie = lockdep_pin_lock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * Now that the task has been migrated to the new RQ and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * have that locked, proceed as normal and enqueue the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (dl_task(rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) check_preempt_curr_dl(rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * Queueing this task back might have overloaded rq, check if we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * to kick someone away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (has_pushable_dl_tasks(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Nothing relies on rq->lock after this, so its safe to drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * rq->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rq_unpin_lock(rq, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) push_dl_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) rq_repin_lock(rq, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) task_rq_unlock(rq, p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * This can free the task_struct, including this hrtimer, do not touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * anything related to that after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void init_dl_task_timer(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct hrtimer *timer = &dl_se->dl_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) timer->function = dl_task_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * During the activation, CBS checks if it can reuse the current task's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * runtime and period. If the deadline of the task is in the past, CBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * cannot use the runtime, and so it replenishes the task. This rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * works fine for implicit deadline tasks (deadline == period), and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * CBS was designed for implicit deadline tasks. However, a task with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * constrained deadline (deadline < period) might be awakened after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * deadline, but before the next period. In this case, replenishing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * task would allow it to run for runtime / deadline. As in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * deadline < period, CBS enables a task to run for more than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * runtime / period. In a very loaded system, this can cause a domino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * effect, making other tasks miss their deadlines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * To avoid this problem, in the activation of a constrained deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * task after the deadline but before the next period, throttle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * task and set the replenishing timer to the begin of the next period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * unless it is boosted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dl_se->dl_throttled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (dl_se->runtime > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) dl_se->runtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return (dl_se->runtime <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * This function implements the GRUB accounting rule:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * according to the GRUB reclaiming algorithm, the runtime is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * not decreased as "dq = -dt", but as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * where u is the utilization of the task, Umax is the maximum reclaimable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * utilization, Uinact is the (per-runqueue) inactive utilization, computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * as the difference between the "total runqueue utilization" and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * runqueue active utilization, and Uextra is the (per runqueue) extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * reclaimable utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * multiplied by 2^BW_SHIFT, the result has to be shifted right by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * BW_SHIFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Since delta is a 64 bit variable, to have an overflow its value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * So, overflow is not an issue here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u64 u_act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * we compare u_inact + rq->dl.extra_bw with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * u_inact + rq->dl.extra_bw can be larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * leading to wrong results)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) u_act = u_act_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return (delta * u_act) >> BW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * Update the current task's runtime statistics (provided it is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * a -deadline task and has not been removed from the dl_rq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static void update_curr_dl(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct task_struct *curr = rq->curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct sched_dl_entity *dl_se = &curr->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) u64 delta_exec, scaled_delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) int cpu = cpu_of(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (!dl_task(curr) || !on_dl_rq(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * Consumed budget is computed considering the time as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * observed by schedulable tasks (excluding time spent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * in hardirq context, etc.). Deadlines are instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * computed using hard walltime. This seems to be the more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * natural solution, but the full ramifications of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * approach need further study.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) now = rq_clock_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) delta_exec = now - curr->se.exec_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (unlikely((s64)delta_exec <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (unlikely(dl_se->dl_yielded))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) goto throttle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) schedstat_set(curr->se.statistics.exec_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) max(curr->se.statistics.exec_max, delta_exec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) curr->se.sum_exec_runtime += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) account_group_exec_runtime(curr, delta_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) curr->se.exec_start = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) cgroup_account_cputime(curr, delta_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (dl_entity_is_special(dl_se))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * For tasks that participate in GRUB, we implement GRUB-PA: the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * spare reclaimed bandwidth is used to clock down frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * For the others, we still need to scale reservation parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * according to current frequency and CPU maximum capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) scaled_delta_exec = grub_reclaim(delta_exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) &curr->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) unsigned long scale_freq = arch_scale_freq_capacity(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) scaled_delta_exec = cap_scale(delta_exec, scale_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dl_se->runtime -= scaled_delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) throttle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) dl_se->dl_throttled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* If requested, inform the user about runtime overruns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (dl_runtime_exceeded(dl_se) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dl_se->dl_overrun = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) __dequeue_task_dl(rq, curr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (!is_leftmost(curr, &rq->dl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * Because -- for now -- we share the rt bandwidth, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * account our runtime there too, otherwise actual rt tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * would be able to exceed the shared quota.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Account to the root rt group for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * The solution we're working towards is having the RT groups scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * using deadline servers -- however there's a few nasties to figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * out before that can happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (rt_bandwidth_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct rt_rq *rt_rq = &rq->rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) raw_spin_lock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * We'll let actual RT tasks worry about the overflow here, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * have our own CBS to keep us inline; only account when RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * bandwidth is relevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (sched_rt_bandwidth_account(rt_rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) rt_rq->rt_time += delta_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) raw_spin_unlock(&rt_rq->rt_runtime_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct sched_dl_entity *dl_se = container_of(timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct sched_dl_entity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) inactive_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct task_struct *p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct rq_flags rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) rq = task_rq_lock(p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) sched_clock_tick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (!dl_task(p) || p->state == TASK_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) dl_se->dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) __dl_clear_params(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (dl_se->dl_non_contending == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) sub_running_bw(dl_se, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) dl_se->dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) task_rq_unlock(rq, p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct hrtimer *timer = &dl_se->inactive_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) timer->function = inactive_task_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (dl_rq->earliest_dl.curr == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dl_rq->earliest_dl.curr = deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct rq *rq = rq_of_dl_rq(dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * Since we may have removed our earliest (and/or next earliest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * task we must recompute them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!dl_rq->dl_nr_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dl_rq->earliest_dl.curr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) dl_rq->earliest_dl.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) cpudl_clear(&rq->rd->cpudl, rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct rb_node *leftmost = dl_rq->root.rb_leftmost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct sched_dl_entity *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) dl_rq->earliest_dl.curr = entry->deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int prio = dl_task_of(dl_se)->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) u64 deadline = dl_se->deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) WARN_ON(!dl_prio(prio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) dl_rq->dl_nr_running++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) add_nr_running(rq_of_dl_rq(dl_rq), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) inc_dl_deadline(dl_rq, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) inc_dl_migration(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int prio = dl_task_of(dl_se)->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) WARN_ON(!dl_prio(prio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) WARN_ON(!dl_rq->dl_nr_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dl_rq->dl_nr_running--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) sub_nr_running(rq_of_dl_rq(dl_rq), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) dec_dl_deadline(dl_rq, dl_se->deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) dec_dl_migration(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct rb_node **link = &dl_rq->root.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct sched_dl_entity *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int leftmost = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) entry = rb_entry(parent, struct sched_dl_entity, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (dl_time_before(dl_se->deadline, entry->deadline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) link = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) link = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) leftmost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) rb_link_node(&dl_se->rb_node, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) inc_dl_tasks(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (RB_EMPTY_NODE(&dl_se->rb_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) RB_CLEAR_NODE(&dl_se->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) dec_dl_tasks(dl_se, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) BUG_ON(on_dl_rq(dl_se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * If this is a wakeup or a new instance, the scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * parameters of the task might need updating. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * we want a replenishment of its runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (flags & ENQUEUE_WAKEUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) task_contending(dl_se, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) update_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) } else if (flags & ENQUEUE_REPLENISH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) replenish_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) } else if ((flags & ENQUEUE_RESTORE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dl_time_before(dl_se->deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) setup_new_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) __enqueue_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) __dequeue_dl_entity(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (is_dl_boosted(&p->dl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * Because of delays in the detection of the overrun of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * thread's runtime, it might be the case that a thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * goes to sleep in a rt mutex with negative runtime. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * a consequence, the thread will be throttled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * While waiting for the mutex, this thread can also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * boosted via PI, resulting in a thread that is throttled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * and boosted at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * In this case, the boost overrides the throttle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (p->dl.dl_throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * The replenish timer needs to be canceled. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * problem if it fires concurrently: boosted threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * are ignored in dl_task_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) hrtimer_try_to_cancel(&p->dl.dl_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) p->dl.dl_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) } else if (!dl_prio(p->normal_prio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * Special case in which we have a !SCHED_DEADLINE task that is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * to be deboosted, but exceeds its runtime while doing so. No point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * replenishing it, as it's going to return back to its original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * scheduling class after this. If it has been throttled, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * clear the flag, otherwise the task may wake up as throttled after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * being boosted again with no means to replenish the runtime and clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * the throttle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) p->dl.dl_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * Check if a constrained deadline task was activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * after the deadline but before the next period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * If that is the case, the task will be throttled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * the replenishment timer will be set to the next period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dl_check_constrained_dl(&p->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) add_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) add_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * If p is throttled, we do not enqueue it. In fact, if it exhausted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * its budget it needs a replenishment and, since it now is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * its rq, the bandwidth timer callback (which clearly has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * run yet) will take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * However, the active utilization does not depend on the fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * that the task is on the runqueue or not (but depends on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * task's state - in GRUB parlance, "inactive" vs "active contending").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * In other words, even if a task is throttled its utilization must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * be counted in the active utilization; hence, we need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * add_running_bw().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (flags & ENQUEUE_WAKEUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) task_contending(&p->dl, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) enqueue_dl_entity(&p->dl, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) enqueue_pushable_dl_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dequeue_dl_entity(&p->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dequeue_pushable_dl_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) update_curr_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) __dequeue_task_dl(rq, p, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) sub_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * This check allows to start the inactive timer (or to immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * decrease the active utilization, if needed) in two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * when the task blocks and when it is terminating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * (p->state == TASK_DEAD). We can handle the two cases in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * way, because from GRUB's point of view the same thing is happening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * (the task moves from "active contending" to "active non contending"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * or "inactive")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (flags & DEQUEUE_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) task_non_contending(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * Yield task semantic for -deadline tasks is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * get off from the CPU until our next instance, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * a new runtime. This is of little use now, since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * don't have a bandwidth reclaiming mechanism. Anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * bandwidth reclaiming is planned for the future, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * yield_task_dl will indicate that some spare budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * is available for other task instances to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void yield_task_dl(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * We make the task go to sleep until its current deadline by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * forcing its runtime to zero. This way, update_curr_dl() stops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * it and the bandwidth timer will wake it up and will give it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * new scheduling parameters (thanks to dl_yielded=1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) rq->curr->dl.dl_yielded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) update_curr_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * Tell update_rq_clock() that we've just updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * so we don't do microscopic update in schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * and double the fastpath cost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) rq_clock_skip_update(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static int find_later_rq(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) struct task_struct *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) bool select_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (sd_flag != SD_BALANCE_WAKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) curr = READ_ONCE(rq->curr); /* unlocked access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * If we are dealing with a -deadline task, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * decide where to wake it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * If it has a later deadline and the current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * on this rq can't move (provided the waking task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * can!) we prefer to send it somewhere else. On the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * other hand, if it has a shorter deadline, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * try to make it stay here, it might be important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) select_rq = unlikely(dl_task(curr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) (curr->nr_cpus_allowed < 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) !dl_entity_preempt(&p->dl, &curr->dl)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) p->nr_cpus_allowed > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * Take the capacity of the CPU into account to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * ensure it fits the requirement of the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (static_branch_unlikely(&sched_asym_cpucapacity))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) select_rq |= !dl_task_fits_capacity(p, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (select_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int target = find_later_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (target != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) (dl_time_before(p->dl.deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) cpu_rq(target)->dl.earliest_dl.curr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) (cpu_rq(target)->dl.dl_nr_running == 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (p->state != TASK_WAKING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) rq = task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * Since p->state == TASK_WAKING, set_task_cpu() has been called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * from try_to_wake_up(). Hence, p->pi_lock is locked, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * rq->lock is not... So, lock it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) raw_spin_lock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (p->dl.dl_non_contending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) update_rq_clock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) sub_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) p->dl.dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * If the timer handler is currently running and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * timer cannot be cancelled, inactive_task_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * will see that dl_not_contending is not set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * will not touch the rq's active utilization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * so we are still safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) raw_spin_unlock(&rq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * Current can't be migrated, useless to reschedule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * let's hope p can move out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (rq->curr->nr_cpus_allowed == 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * p is migratable, so let's not schedule it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * see if it is pushed or pulled somewhere else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (p->nr_cpus_allowed != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) cpudl_find(&rq->rd->cpudl, p, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * This is OK, because current is on_cpu, which avoids it being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * picked for load-balance and preemption/IRQs are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * disabled avoiding further scheduler activity on it and we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * not yet started the picking loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) rq_unpin_lock(rq, rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) pull_dl_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) rq_repin_lock(rq, rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return sched_stop_runnable(rq) || sched_dl_runnable(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * Only called when both the current and waking task are -deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * In the unlikely case current and p have the same deadline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * let us try to decide what's the best thing to do...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if ((p->dl.deadline == rq->curr->dl.deadline) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) !test_tsk_need_resched(rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) check_preempt_equal_dl(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) #ifdef CONFIG_SCHED_HRTICK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) hrtick_start(rq, p->dl.runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) #else /* !CONFIG_SCHED_HRTICK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) p->se.exec_start = rq_clock_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /* You can't push away the running task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) dequeue_pushable_dl_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (hrtick_enabled(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) start_hrtick_dl(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (rq->curr->sched_class != &dl_sched_class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) deadline_queue_push_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) struct rb_node *left = rb_first_cached(&dl_rq->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (!left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return rb_entry(left, struct sched_dl_entity, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static struct task_struct *pick_next_task_dl(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct sched_dl_entity *dl_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct dl_rq *dl_rq = &rq->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!sched_dl_runnable(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) dl_se = pick_next_dl_entity(rq, dl_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) BUG_ON(!dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) p = dl_task_of(dl_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) set_next_task_dl(rq, p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) update_curr_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) enqueue_pushable_dl_task(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * scheduler tick hitting a task of our scheduling class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * NOTE: This function can be called remotely by the tick offload that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * goes along full dynticks. Therefore no local assumption can be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * and everything must be accessed through the @rq and @curr passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) update_curr_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * Even when we have runtime, update_curr_dl() might have resulted in us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * not being the leftmost task anymore. In that case NEED_RESCHED will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * be set and schedule() will start a new hrtick for the next task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) is_leftmost(p, &rq->dl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) start_hrtick_dl(rq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static void task_fork_dl(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * SCHED_DEADLINE tasks cannot fork and this is achieved through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * sched_fork()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* Only try algorithms three times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #define DL_MAX_TRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (!task_running(rq, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) cpumask_test_cpu(cpu, p->cpus_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * Return the earliest pushable rq's task, which is suitable to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * on the CPU, NULL otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct task_struct *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (!has_pushable_dl_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) next_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (next_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (pick_dl_task(rq, p, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) next_node = rb_next(next_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) goto next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static int find_later_rq(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct sched_domain *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) int cpu = task_cpu(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Make sure the mask is initialized first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (unlikely(!later_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (task->nr_cpus_allowed == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * We have to consider system topology and task affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * first, then we can look for a suitable CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * If we are here, some targets have been found, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * the most suitable which is, among the runqueues where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * current tasks have later deadlines than the task's one, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * rq with the latest possible one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * Now we check how well this matches with task's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * affinity and system topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * The last CPU where the task run is our first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * guess, since it is most likely cache-hot there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (cpumask_test_cpu(cpu, later_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * Check if this_cpu is to be skipped (i.e., it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * not in the mask) or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (!cpumask_test_cpu(this_cpu, later_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) this_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) for_each_domain(cpu, sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (sd->flags & SD_WAKE_AFFINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) int best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * If possible, preempting this_cpu is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * cheaper than migrating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (this_cpu != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) best_cpu = cpumask_first_and(later_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) sched_domain_span(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * Last chance: if a CPU being in both later_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * and current sd span is valid, that becomes our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * choice. Of course, the latest possible CPU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * already under consideration through later_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (best_cpu < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return best_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * At this point, all our guesses failed, we just return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * 'something', and let the caller sort the things out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (this_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) cpu = cpumask_any(later_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /* Locks the rq it finds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct rq *later_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) for (tries = 0; tries < DL_MAX_TRIES; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) cpu = find_later_rq(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if ((cpu == -1) || (cpu == rq->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) later_rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (later_rq->dl.dl_nr_running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) !dl_time_before(task->dl.deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) later_rq->dl.earliest_dl.curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * Target rq has tasks of equal or earlier deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * retrying does not release any lock and is unlikely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * to yield a different result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) later_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Retry if something changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (double_lock_balance(rq, later_rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (unlikely(task_rq(task) != rq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) task_running(rq, task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) !dl_task(task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) !task_on_rq_queued(task))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) double_unlock_balance(rq, later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) later_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * If the rq we found has no -deadline task, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * its earliest one has a later deadline than our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * task, the rq is a good one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (!later_rq->dl.dl_nr_running ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) dl_time_before(task->dl.deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) later_rq->dl.earliest_dl.curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* Otherwise we try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) double_unlock_balance(rq, later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) later_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return later_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (!has_pushable_dl_tasks(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct task_struct, pushable_dl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) BUG_ON(rq->cpu != task_cpu(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) BUG_ON(task_current(rq, p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) BUG_ON(p->nr_cpus_allowed <= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) BUG_ON(!task_on_rq_queued(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) BUG_ON(!dl_task(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * See if the non running -deadline tasks on this rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * can be sent to some other CPU where they can preempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * and start executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static int push_dl_task(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) struct task_struct *next_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct rq *later_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (!rq->dl.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) next_task = pick_next_pushable_dl_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (!next_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (WARN_ON(next_task == rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * If next_task preempts rq->curr, and rq->curr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * can move away, it makes sense to just reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * without going further in pushing next_task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (dl_task(rq->curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) rq->curr->nr_cpus_allowed > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* We might release rq lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) get_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /* Will lock the rq it'll find */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) later_rq = find_lock_later_rq(next_task, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (!later_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * We must check all this again, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * find_lock_later_rq releases rq->lock and it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * then possible that next_task has migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) task = pick_next_pushable_dl_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (task == next_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * The task is still there. We don't try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * again, some other CPU will pull it when ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (!task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /* No more tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) put_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) next_task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) deactivate_task(rq, next_task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) set_task_cpu(next_task, later_rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * Update the later_rq clock here, because the clock is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * by the cpufreq_update_util() inside __add_running_bw().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) update_rq_clock(later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) resched_curr(later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) double_unlock_balance(rq, later_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) put_task_struct(next_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static void push_dl_tasks(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /* push_dl_task() will return true if it moved a -deadline task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) while (push_dl_task(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static void pull_dl_task(struct rq *this_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) int this_cpu = this_rq->cpu, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) bool resched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) struct rq *src_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) u64 dmin = LONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (likely(!dl_overloaded(this_rq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * Match the barrier from dl_set_overloaded; this guarantees that if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * see overloaded we must also see the dlo_mask bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) for_each_cpu(cpu, this_rq->rd->dlo_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (this_cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) src_rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * It looks racy, abd it is! However, as in sched_rt.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * we are fine with this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (this_rq->dl.dl_nr_running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) dl_time_before(this_rq->dl.earliest_dl.curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) src_rq->dl.earliest_dl.next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /* Might drop this_rq->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) double_lock_balance(this_rq, src_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * If there are no more pullable tasks on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * rq, we're done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (src_rq->dl.dl_nr_running <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * We found a task to be pulled if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * - it preempts our current (if there's one),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * - it will preempt the last one we pulled (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (p && dl_time_before(p->dl.deadline, dmin) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) (!this_rq->dl.dl_nr_running ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) dl_time_before(p->dl.deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) this_rq->dl.earliest_dl.curr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) WARN_ON(p == src_rq->curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) WARN_ON(!task_on_rq_queued(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * Then we pull iff p has actually an earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * deadline than the current task of its runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (dl_time_before(p->dl.deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) src_rq->curr->dl.deadline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) resched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) deactivate_task(src_rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) set_task_cpu(p, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) activate_task(this_rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) dmin = p->dl.deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) /* Is there any other task even earlier? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) double_unlock_balance(this_rq, src_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) resched_curr(this_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * Since the task is not running and a reschedule is not going to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * anytime soon on its runqueue, we try pushing it away now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void task_woken_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (!task_running(rq, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) !test_tsk_need_resched(rq->curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) p->nr_cpus_allowed > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) dl_task(rq->curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) (rq->curr->nr_cpus_allowed < 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) push_dl_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) static void set_cpus_allowed_dl(struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) const struct cpumask *new_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) struct root_domain *src_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) BUG_ON(!dl_task(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) rq = task_rq(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) src_rd = rq->rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * Migrating a SCHED_DEADLINE task between exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * cpusets (different root_domains) entails a bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * update. We already made space for us in the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * domain (see cpuset_can_attach()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) if (!cpumask_intersects(src_rd->span, new_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct dl_bw *src_dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) src_dl_b = dl_bw_of(cpu_of(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * We now free resources of the root_domain we are migrating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * off. In the worst case, sched_setattr() may temporary fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * until we complete the update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) raw_spin_lock(&src_dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) raw_spin_unlock(&src_dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) set_cpus_allowed_common(p, new_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* Assumes rq->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static void rq_online_dl(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (rq->dl.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) dl_set_overload(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (rq->dl.dl_nr_running > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /* Assumes rq->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) static void rq_offline_dl(struct rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (rq->dl.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) dl_clear_overload(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) cpudl_clear(&rq->rd->cpudl, rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) void __init init_sched_dl_class(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) GFP_KERNEL, cpu_to_node(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) void dl_add_task_root_domain(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) struct rq_flags rf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) struct rq *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) if (!dl_task(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) rq = __task_rq_lock(p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) dl_b = &rq->rd->dl_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) task_rq_unlock(rq, p, &rf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) void dl_clear_root_domain(struct root_domain *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) rd->dl_bw.total_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static void switched_from_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) * task_non_contending() can start the "inactive timer" (if the 0-lag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * time is in the future). If the task switches back to dl before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * the "inactive timer" fires, it can continue to consume its current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * runtime using its current deadline. If it stays outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * will reset the task parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (task_on_rq_queued(p) && p->dl.dl_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) task_non_contending(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (!task_on_rq_queued(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * Inactive timer is armed. However, p is leaving DEADLINE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * might migrate away from this rq while continuing to run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * some other class. We need to remove its contribution from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * this rq running_bw now, or sub_rq_bw (below) will complain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (p->dl.dl_non_contending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) sub_running_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) sub_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) * We cannot use inactive_task_timer() to invoke sub_running_bw()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) * at the 0-lag time, because the task could have been migrated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * while SCHED_OTHER in the meanwhile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (p->dl.dl_non_contending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) p->dl.dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * Since this might be the only -deadline task on the rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * this is the right place to try to pull some other one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * from an overloaded CPU, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) deadline_queue_pull_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * When switching to -deadline, we may overload the rq, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * we try to push someone off, if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) static void switched_to_dl(struct rq *rq, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) /* If p is not queued we will update its parameters at next wakeup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!task_on_rq_queued(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) add_rq_bw(&p->dl, &rq->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (rq->curr != p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) deadline_queue_push_tasks(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (dl_task(rq->curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) check_preempt_curr_dl(rq, p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) * If the scheduling parameters of a -deadline task changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * a push or pull operation might be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static void prio_changed_dl(struct rq *rq, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int oldprio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (task_on_rq_queued(p) || rq->curr == p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) * This might be too much, but unfortunately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * we don't have the old deadline value, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * we can't argue if the task is increasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * or lowering its prio, so...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (!rq->dl.overloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) deadline_queue_pull_task(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * If we now have a earlier deadline task than p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * then reschedule, provided p is still on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * Again, we don't know if p has a earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * or later deadline, so let's blindly set a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * (maybe not needed) rescheduling point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) resched_curr(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) const struct sched_class dl_sched_class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) __section("__dl_sched_class") = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) .enqueue_task = enqueue_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) .dequeue_task = dequeue_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) .yield_task = yield_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) .check_preempt_curr = check_preempt_curr_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) .pick_next_task = pick_next_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) .put_prev_task = put_prev_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) .set_next_task = set_next_task_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) .balance = balance_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) .select_task_rq = select_task_rq_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) .migrate_task_rq = migrate_task_rq_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) .set_cpus_allowed = set_cpus_allowed_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) .rq_online = rq_online_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) .rq_offline = rq_offline_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) .task_woken = task_woken_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) .task_tick = task_tick_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) .task_fork = task_fork_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) .prio_changed = prio_changed_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) .switched_from = switched_from_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) .switched_to = switched_to_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) .update_curr = update_curr_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) int sched_dl_global_validate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) u64 runtime = global_rt_runtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) u64 period = global_rt_period();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) u64 new_bw = to_ratio(period, runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) int cpu, cpus, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * Here we want to check the bandwidth not being set to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * value smaller than the currently allocated bandwidth in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * any of the root_domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * FIXME: Cycling on all the CPUs is overdoing, but simpler than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * cycling on root_domains... Discussion on different/better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * solutions is welcome!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) dl_b = dl_bw_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) cpus = dl_bw_cpus(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) raw_spin_lock_irqsave(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (new_bw * cpus < dl_b->total_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) raw_spin_unlock_irqrestore(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (global_rt_runtime() == RUNTIME_INF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) dl_rq->bw_ratio = 1 << RATIO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) dl_rq->extra_bw = 1 << BW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) dl_rq->extra_bw = to_ratio(global_rt_period(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) global_rt_runtime());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) void sched_dl_do_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) u64 new_bw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) def_dl_bandwidth.dl_period = global_rt_period();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) def_dl_bandwidth.dl_runtime = global_rt_runtime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (global_rt_runtime() != RUNTIME_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) new_bw = to_ratio(global_rt_period(), global_rt_runtime());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * FIXME: As above...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) dl_b = dl_bw_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) raw_spin_lock_irqsave(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) dl_b->bw = new_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) raw_spin_unlock_irqrestore(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * We must be sure that accepting a new task (or allowing changing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * parameters of an existing one) is consistent with the bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * constraints. If yes, this function also accordingly updates the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * allocated bandwidth to reflect the new situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) * This function is called while holding p's rq->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) int sched_dl_overflow(struct task_struct *p, int policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) const struct sched_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) u64 period = attr->sched_period ?: attr->sched_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) u64 runtime = attr->sched_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) int cpus, err = -1, cpu = task_cpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct dl_bw *dl_b = dl_bw_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) unsigned long cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (attr->sched_flags & SCHED_FLAG_SUGOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /* !deadline task may carry old deadline bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * Either if a task, enters, leave, or stays -deadline but changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * its parameters, we may need to update accordingly the total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * allocated bandwidth of the container.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) raw_spin_lock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) cpus = dl_bw_cpus(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) cap = dl_bw_capacity(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (dl_policy(policy) && !task_has_dl_policy(p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) !__dl_overflow(dl_b, cap, 0, new_bw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (hrtimer_active(&p->dl.inactive_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) __dl_sub(dl_b, p->dl.dl_bw, cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) __dl_add(dl_b, new_bw, cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) } else if (dl_policy(policy) && task_has_dl_policy(p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) * XXX this is slightly incorrect: when the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) * utilization decreases, we should delay the total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * utilization change until the task's 0-lag point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * But this would require to set the task's "inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * timer" when the task is not inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) __dl_sub(dl_b, p->dl.dl_bw, cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) __dl_add(dl_b, new_bw, cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) dl_change_utilization(p, new_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * Do not decrease the total deadline utilization here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * switched_from_dl() will take care to do it at the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * (0-lag) time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) raw_spin_unlock(&dl_b->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * This function initializes the sched_dl_entity of a newly becoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * SCHED_DEADLINE task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * Only the static values are considered here, the actual runtime and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * absolute deadline will be properly calculated when the task is enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * for the first time with its new policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) dl_se->dl_runtime = attr->sched_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) dl_se->dl_deadline = attr->sched_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) attr->sched_priority = p->rt_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) attr->sched_runtime = dl_se->dl_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) attr->sched_deadline = dl_se->dl_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) attr->sched_period = dl_se->dl_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) attr->sched_flags &= ~SCHED_DL_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) attr->sched_flags |= dl_se->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * Default limits for DL period; on the top end we guard against small util
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * tasks still getting rediculous long effective runtimes, on the bottom end we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * guard against timer DoS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * This function validates the new parameters of a -deadline task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) * We ask for the deadline not being zero, and greater or equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) * than the runtime, as well as the period of being zero or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * greater than deadline. Furthermore, we have to be sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * user parameters are above the internal resolution of 1us (we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) * check sched_runtime only since it is always the smaller one) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * below 2^63 ns (we have to check both sched_deadline and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * sched_period, as the latter can be zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) bool __checkparam_dl(const struct sched_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) u64 period, max, min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) /* special dl tasks don't actually use any parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (attr->sched_flags & SCHED_FLAG_SUGOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* deadline != 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (attr->sched_deadline == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * Since we truncate DL_SCALE bits, make sure we're at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * that big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (attr->sched_runtime < (1ULL << DL_SCALE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * Since we use the MSB for wrap-around and sign issues, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * sure it's not set (mind that period can be equal to zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (attr->sched_deadline & (1ULL << 63) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) attr->sched_period & (1ULL << 63))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) period = attr->sched_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (!period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) period = attr->sched_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) /* runtime <= deadline <= period (if period != 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (period < attr->sched_deadline ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) attr->sched_deadline < attr->sched_runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) if (period < min || period > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) * This function clears the sched_dl_entity static params.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) void __dl_clear_params(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) dl_se->dl_runtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) dl_se->dl_deadline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) dl_se->dl_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) dl_se->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) dl_se->dl_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) dl_se->dl_density = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) dl_se->dl_throttled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) dl_se->dl_yielded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) dl_se->dl_non_contending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) dl_se->dl_overrun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) #ifdef CONFIG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) dl_se->pi_se = dl_se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) struct sched_dl_entity *dl_se = &p->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (dl_se->dl_runtime != attr->sched_runtime ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) dl_se->dl_deadline != attr->sched_deadline ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) dl_se->dl_period != attr->sched_period ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) unsigned long flags, cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) unsigned int dest_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) bool overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) dl_b = dl_bw_of(dest_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) raw_spin_lock_irqsave(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) cap = dl_bw_capacity(dest_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) * We reserve space for this task in the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) * root_domain, as we can't fail after this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) * We will free resources in the source root_domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) * later on (see set_cpus_allowed_dl()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) int cpus = dl_bw_cpus(dest_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) __dl_add(dl_b, p->dl.dl_bw, cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) raw_spin_unlock_irqrestore(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) const struct cpumask *trial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) int ret = 1, trial_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct dl_bw *cur_dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) cur_dl_b = dl_bw_of(cpumask_any(cur));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) trial_cpus = cpumask_weight(trial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (cur_dl_b->bw != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) bool dl_cpu_busy(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) unsigned long flags, cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) struct dl_bw *dl_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) bool overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) dl_b = dl_bw_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) raw_spin_lock_irqsave(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) cap = dl_bw_capacity(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) overflow = __dl_overflow(dl_b, cap, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) raw_spin_unlock_irqrestore(&dl_b->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) return overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) #ifdef CONFIG_SCHED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) void print_dl_stats(struct seq_file *m, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) #endif /* CONFIG_SCHED_DEBUG */