^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Per Entity Load Tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Interactivity improvements by Mike Galbraith
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) 2007 Mike Galbraith <efault@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Various enhancements by Dmitry Adamushko.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Group scheduling enhancements by Srivatsa Vaddagiri
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright IBM Corporation, 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Scaled math optimizations by Thomas Gleixner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Move PELT related code from fair.c into this pelt.c file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Author: Vincent Guittot <vincent.guittot@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "pelt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int sysctl_sched_pelt_period = PELT32_LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) const u32 *pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int get_pelt_halflife(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return pelt_load_avg_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) EXPORT_SYMBOL_GPL(get_pelt_halflife);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int __set_pelt_halflife(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int num = *(int *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) switch (num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) case PELT8_LOAD_AVG_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pelt_load_avg_period = PELT8_LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pelt_load_avg_max = PELT8_LOAD_AVG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pelt_runnable_avg_yN_inv = pelt8_runnable_avg_yN_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pr_info("PELT half life is set to %dms\n", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) case PELT32_LOAD_AVG_PERIOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) pr_info("PELT half life is set to %dms\n", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pr_err("Failed to set PELT half life to %dms, the current value is %dms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) num, pelt_load_avg_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) sysctl_sched_pelt_period = pelt_load_avg_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int set_pelt_halflife(int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return stop_machine(__set_pelt_halflife, &num, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) EXPORT_SYMBOL_GPL(set_pelt_halflife);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int sched_pelt_period_update_handler(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (ret || !write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) set_pelt_halflife(sysctl_sched_pelt_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int __init set_pelt(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int rc, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rc = kstrtoint(str, 0, &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __set_pelt_halflife(&num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) early_param("pelt", set_pelt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Approximate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static u64 decay_load(u64 val, u64 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int local_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (unlikely(n > LOAD_AVG_PERIOD * 63))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* after bounds checking we can collapse to 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) local_n = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * As y^PERIOD = 1/2, we can combine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * With a look-up table which covers y^n (n<PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * To achieve constant time decay_load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) val >>= local_n / LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) local_n %= LOAD_AVG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) val = mul_u64_u32_shr(val, pelt_runnable_avg_yN_inv[local_n], 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 c1, c2, c3 = d3; /* y^0 == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * c1 = d1 y^p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) c1 = decay_load((u64)d1, periods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * p-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * c2 = 1024 \Sum y^n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * n=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * inf inf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * n=0 n=p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return c1 + c2 + c3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Accumulate the three separate parts of the sum; d1 the remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * of the last (incomplete) period, d2 the span of full periods and d3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * the remainder of the (incomplete) current period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * d1 d2 d3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * ^ ^ ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * |<->|<----------------->|<--->|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * ... |---x---|------| ... |------|-----x (now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * p-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * n=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * = u y^p + (Step 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * p-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * n=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static __always_inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) accumulate_sum(u64 delta, struct sched_avg *sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long load, unsigned long runnable, int running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u64 periods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) delta += sa->period_contrib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) periods = delta / 1024; /* A period is 1024us (~1ms) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Step 1: decay old *_sum if we crossed period boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (periods) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) sa->load_sum = decay_load(sa->load_sum, periods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sa->runnable_sum =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) decay_load(sa->runnable_sum, periods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sa->util_sum = decay_load((u64)(sa->util_sum), periods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Step 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) delta %= 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * This relies on the:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * if (!load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * runnable = running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * clause from ___update_load_sum(); this results in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * the below usage of @contrib to dissapear entirely,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * so no point in calculating it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) contrib = __accumulate_pelt_segments(periods,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 1024 - sa->period_contrib, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) sa->period_contrib = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sa->load_sum += load * contrib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (runnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return periods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * We can represent the historical contribution to runnable average as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * coefficients of a geometric series. To do this we sub-divide our runnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * history into segments of approximately 1ms (1024us); label the segment that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * p0 p1 p2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * (now) (~1ms ago) (~2ms ago)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Let u_i denote the fraction of p_i that the entity was runnable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * We then designate the fractions u_i as our co-efficients, yielding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * following representation of historical load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * We choose y based on the with of a reasonably scheduling period, fixing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * y^32 = 0.5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * This means that the contribution to load ~32ms ago (u_32) will be weighted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * approximately half as much as the contribution to load within the last ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * (u_0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * When a period "rolls over" and we have new u_0`, multiplying the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * sum again by y is sufficient to update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ___update_load_sum(u64 now, struct sched_avg *sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long load, unsigned long runnable, int running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) delta = now - sa->last_update_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * This should only happen when time goes backwards, which it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * unfortunately does during sched clock init when we swap over to TSC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if ((s64)delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) sa->last_update_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Use 1024ns as the unit of measurement since it's a reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * approximation of 1us and fast to compute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) delta >>= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sa->last_update_time += delta << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * running is a subset of runnable (weight) so running can't be set if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * runnable is clear. But there are some corner cases where the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * se has been already dequeued but cfs_rq->curr still points to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * This means that weight will be 0 but not running for a sched_entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * but also for a cfs_rq if the latter becomes idle. As an example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * this happens during idle_balance() which calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * update_blocked_averages().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Also see the comment in accumulate_sum().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) runnable = running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Now we know we crossed measurement unit boundaries. The *_avg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * accrues by two steps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * Step 1: accumulate *_sum since last_update_time. If we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * crossed period boundaries, finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!accumulate_sum(delta, sa, load, runnable, running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * When syncing *_avg with *_sum, we must take into account the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * position in the PELT segment otherwise the remaining part of the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * will be considered as idle time whereas it's not yet elapsed and this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * generate unwanted oscillation in the range [1002..1024[.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * The max value of *_sum varies with the position in the time segment and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * equals to :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * LOAD_AVG_MAX*y + sa->period_contrib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * which can be simplified into:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * LOAD_AVG_MAX - 1024 + sa->period_contrib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * The same care must be taken when a sched entity is added, updated or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * and the cfs rq, to which they are attached, have the same position in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * time segment because they use the same clock. This means that we can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * if it's more convenient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ___update_load_avg(struct sched_avg *sa, unsigned long load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u32 divider = get_pelt_divider(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Step 2: update *_avg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) sa->load_avg = div_u64(load * sa->load_sum, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sa->runnable_avg = div_u64(sa->runnable_sum, divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * sched_entity:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * se_weight() = se->load.weight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * se_runnable() = !!on_rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * group: [ see update_cfs_group() ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * se_weight() = tg->weight * grq->load_avg / tg->load_avg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * se_runnable() = grq->h_nr_running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * runnable_sum = se_runnable() * runnable = grq->runnable_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * runnable_avg = runnable_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * load_sum := runnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * load_avg = se_weight(se) * load_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * cfq_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * runnable_sum = \Sum se->avg.runnable_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * runnable_avg = \Sum se->avg.runnable_avg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * load_sum = \Sum se_weight(se) * se->avg.load_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * load_avg = \Sum se->avg.load_avg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ___update_load_avg(&se->avg, se_weight(se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) trace_pelt_se_tp(se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) cfs_rq->curr == se)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ___update_load_avg(&se->avg, se_weight(se));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cfs_se_util_change(&se->avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) trace_pelt_se_tp(se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (___update_load_sum(now, &cfs_rq->avg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) scale_load_down(cfs_rq->load.weight),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cfs_rq->h_nr_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) cfs_rq->curr != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ___update_load_avg(&cfs_rq->avg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) trace_pelt_cfs_tp(cfs_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * rt_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * util_sum = cpu_scale * load_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * runnable_sum = util_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * load_avg and runnable_avg are not supported and meaningless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (___update_load_sum(now, &rq->avg_rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ___update_load_avg(&rq->avg_rt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) trace_pelt_rt_tp(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * dl_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * util_sum = cpu_scale * load_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * runnable_sum = util_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * load_avg and runnable_avg are not supported and meaningless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (___update_load_sum(now, &rq->avg_dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ___update_load_avg(&rq->avg_dl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) trace_pelt_dl_tp(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #ifdef CONFIG_SCHED_THERMAL_PRESSURE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * thermal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * util_avg and runnable_load_avg are not supported and meaningless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Unlike rt/dl utilization tracking that track time spent by a cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * running a rt/dl task through util_avg, the average thermal pressure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * tracked through load_avg. This is because thermal pressure signal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * time weighted "delta" capacity unlike util_avg which is binary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * "delta capacity" = actual capacity -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * capped capacity a cpu due to a thermal event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (___update_load_sum(now, &rq->avg_thermal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) capacity)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ___update_load_avg(&rq->avg_thermal, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) trace_pelt_thermal_tp(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * util_sum = cpu_scale * load_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * runnable_sum = util_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * load_avg and runnable_avg are not supported and meaningless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int update_irq_load_avg(struct rq *rq, u64 running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * We can't use clock_pelt because irq time is not accounted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * clock_task. Instead we directly scale the running time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * reflect the real amount of computation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * We know the time that has been used by interrupt since last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * but we don't when. Let be pessimistic and assume that interrupt has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * happened just before the update. This is not so far from reality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * because interrupt will most probably wake up task and trig an update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * of rq clock during which the metric is updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * We start to decay with normal context time and then we add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * interrupt context time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * We can safely remove running from rq->clock because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * rq->clock += delta with delta >= running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret += ___update_load_sum(rq->clock, &rq->avg_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ___update_load_avg(&rq->avg_irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) trace_pelt_irq_tp(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif