^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Only give sleepers 50% of their service deficit. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * them to run sooner, but does not allow tons of sleepers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * rip the spread apart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Place new tasks ahead so that they do not starve already running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) SCHED_FEAT(START_DEBIT, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Prefer to schedule the task we woke last (assuming it failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * wakeup-preemption), since its likely going to consume data we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * touched, increases cache locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) SCHED_FEAT(NEXT_BUDDY, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Prefer to schedule the task that ran last (when we did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * wake-preempt) as that likely will touch the same data, increases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * cache locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) SCHED_FEAT(LAST_BUDDY, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Consider buddies to be cache hot, decreases the likelyness of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * cache buddy being migrated away, increases cache locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) SCHED_FEAT(CACHE_HOT_BUDDY, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Allow wakeup-time preemption of the current task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) SCHED_FEAT(WAKEUP_PREEMPTION, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) SCHED_FEAT(HRTICK, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) SCHED_FEAT(DOUBLE_TICK, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Decrement CPU capacity based on time not spent running tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) SCHED_FEAT(NONTASK_CAPACITY, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Queue remote wakeups on the target CPU and process them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * using the scheduler IPI. Reduces rq->lock contention/bounces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) SCHED_FEAT(TTWU_QUEUE, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) SCHED_FEAT(SIS_AVG_CPU, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) SCHED_FEAT(SIS_PROP, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Issue a WARN when we do multiple update_rq_clock() calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * in a single rq->lock section. Default disabled because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * annotations are not complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef HAVE_RT_PUSH_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * In order to avoid a thundering herd attack of CPUs that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * lowering their priorities at the same time, and there being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * a single CPU that has an RT task that can migrate and is waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * to run, where the other CPUs will try to take that CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * rq lock and possibly create a large contention, sending an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * IPI to that CPU and let that CPU push the RT task to where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * it should go may be a better scenario.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) SCHED_FEAT(RT_PUSH_IPI, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) SCHED_FEAT(RT_RUNTIME_SHARE, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) SCHED_FEAT(LB_MIN, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) SCHED_FEAT(ATTACH_AGE_LOAD, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) SCHED_FEAT(WA_IDLE, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) SCHED_FEAT(WA_WEIGHT, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) SCHED_FEAT(WA_BIAS, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * UtilEstimation. Use estimated CPU utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) SCHED_FEAT(UTIL_EST, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) SCHED_FEAT(UTIL_EST_FASTUP, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) SCHED_FEAT(ALT_PERIOD, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) SCHED_FEAT(BASE_SLICE, true)