^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * CPUFreq governor based on scheduler-provided CPU utilization data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <trace/hooks/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct sugov_tunables {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct gov_attr_set attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int rate_limit_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int target_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct sugov_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct sugov_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct list_head tunables_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) raw_spinlock_t update_lock; /* For shared policies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 last_freq_update_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) s64 freq_update_delay_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned int cached_raw_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* The next fields are only needed if fast switch cannot be used: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct irq_work irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct kthread_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct mutex work_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct kthread_worker worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool work_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool limits_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) bool need_freq_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct sugov_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct update_util_data update_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sugov_policy *sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bool iowait_boost_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int iowait_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u64 last_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long bw_dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* The field below is for single-CPU policies only: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifdef CONFIG_NO_HZ_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long saved_idle_calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /************************ Governor internals ***********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) s64 delta_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Since cpufreq_update_util() is called with rq->lock held for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * the @target_cpu, our per-CPU data is fully serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * However, drivers cannot in general deal with cross-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * requests, so while get_next_freq() will work, our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * sugov_update_commit() call may not for the fast switching platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Hence stop here for remote requests if they aren't supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * by the hardware, as calculating the frequency is pointless if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * we cannot in fact act on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * This is needed on the slow switching platforms too to prevent CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * going offline from leaving stale IRQ work items behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!cpufreq_this_cpu_can_update(sg_policy->policy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (unlikely(sg_policy->limits_changed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) sg_policy->limits_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) sg_policy->need_freq_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) delta_ns = time - sg_policy->last_freq_update_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return delta_ns >= sg_policy->freq_update_delay_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int next_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!sg_policy->need_freq_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (sg_policy->next_freq == next_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sg_policy->next_freq = next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) sg_policy->last_freq_update_time = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned int next_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (sugov_update_next_freq(sg_policy, time, next_freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int next_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!sugov_update_next_freq(sg_policy, time, next_freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!sg_policy->work_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) sg_policy->work_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) irq_work_queue(&sg_policy->irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * get_next_freq - Compute a new frequency for a given cpufreq policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @sg_policy: schedutil policy object to compute the new frequency for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @util: Current CPU utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @max: CPU capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * If the utilization is frequency-invariant, choose the new frequency to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * proportional to it, that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * next_freq = C * max_freq * util / max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Otherwise, approximate the would-be frequency-invariant utilization by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * util_raw * (curr_freq / max_freq) which leads to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * next_freq = C * curr_freq * util_raw / max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * The lowest driver-supported frequency which is equal or greater than the raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * next_freq (as calculated above) is returned, subject to policy min/max and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * cpufreq driver limitations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static unsigned int get_next_freq(struct sugov_policy *sg_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long util, unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct cpufreq_policy *policy = sg_policy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int freq = arch_scale_freq_invariant() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) policy->cpuinfo.max_freq : policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long next_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) &sg_policy->need_freq_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (next_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) freq = next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) freq = div64_ul((u64)(100 * freq / sg_policy->tunables->target_load) * util, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) freq = map_util_freq(util, freq, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return sg_policy->next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) sg_policy->cached_raw_freq = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return cpufreq_driver_resolve_freq(policy, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * This function computes an effective utilization for the given CPU, to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * used for frequency selection given the linear relation: f = u * f_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * The scheduler tracks the following metrics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * cpu_util_{cfs,rt,dl,irq}()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * cpu_bw_dl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Where the cfs,rt and dl util numbers are tracked with the same metric and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * synchronized windows and are thus directly comparable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * The cfs,rt,dl utilization are the running times measured with rq->clock_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * which excludes things like IRQ and steal-time. These latter are then accrued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * in the irq utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * The DL bandwidth number otoh is not a measured metric but a value computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * based on the task model parameters and gives the minimal utilization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * required to meet deadlines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned long max, enum schedutil_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long dl_util, util, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct rq *rq = cpu_rq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!uclamp_is_used() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Early check to see if IRQ/steal time saturates the CPU, can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * because of inaccuracies in how we track these -- see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * update_irq_load_avg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) irq = cpu_util_irq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (unlikely(irq >= max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Because the time spend on RT/DL tasks is visible as 'lost' time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * CFS tasks and we use the same metric to track the effective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * utilization (PELT windows are synchronized) we can directly add them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * to obtain the CPU's actual utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * CFS and RT utilization can be boosted or capped, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * utilization clamp constraints requested by currently RUNNABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * When there are no CFS RUNNABLE tasks, clamps are released and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * frequency will be gracefully reduced with the utilization decay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) util = util_cfs + cpu_util_rt(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (type == FREQUENCY_UTIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) util = uclamp_rq_util_with(rq, util, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dl_util = cpu_util_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * For frequency selection we do not make cpu_util_dl() a permanent part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * of this sum because we want to use cpu_bw_dl() later on, but we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * that we select f_max when there is no idle time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * NOTE: numerical errors or stop class might cause us to not quite hit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * saturation when we should -- something for later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (util + dl_util >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * OTOH, for energy computation we need the estimated running time, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * include util_dl and ignore dl_bw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (type == ENERGY_UTIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) util += dl_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * There is still idle time; further improve the number by using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * irq metric. Because IRQ/steal time is hidden from the task clock we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * need to scale the task numbers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * max - irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * U' = irq + --------- * U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) util = scale_irq_capacity(util, irq, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) util += irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Bandwidth required by DEADLINE must always be granted while, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * to gracefully reduce the frequency when no tasks show up for longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * periods of time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Ideally we would like to set bw_dl as min/guaranteed freq and util +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * bw_dl as requested freq. However, cpufreq is not yet ready for such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * an interface. So, we only do the latter for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (type == FREQUENCY_UTIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) util += cpu_bw_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return min(max, util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) EXPORT_SYMBOL_GPL(schedutil_cpu_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct rq *rq = cpu_rq(sg_cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long util = cpu_util_cfs(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sg_cpu->max = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sg_cpu->bw_dl = cpu_bw_dl(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * sugov_iowait_reset() - Reset the IO boost status of a CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @sg_cpu: the sugov data for the CPU to boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @time: the update time from the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @set_iowait_boost: true if an IO boost has been requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * The IO wait boost of a task is disabled after a tick since the last update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * of a CPU. If a new IO wait boost is requested after more then a tick, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * efficiency by ignoring sporadic wakeups from IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bool set_iowait_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) s64 delta_ns = time - sg_cpu->last_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Reset boost only if a tick has elapsed since last request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (delta_ns <= TICK_NSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) sg_cpu->iowait_boost_pending = set_iowait_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * sugov_iowait_boost() - Updates the IO boost status of a CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @sg_cpu: the sugov data for the CPU to boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @time: the update time from the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Each time a task wakes up after an IO operation, the CPU utilization can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * boosted to a certain utilization which doubles at each "frequent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * of the maximum OPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * To keep doubling, an IO boost has to be requested at least once per tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * otherwise we restart from the utilization of the minimum OPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Reset boost if the CPU appears to have been idle enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (sg_cpu->iowait_boost &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Boost only tasks waking up after IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!set_iowait_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Ensure boost doubles only one time at each request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (sg_cpu->iowait_boost_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) sg_cpu->iowait_boost_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Double the boost at each request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (sg_cpu->iowait_boost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sg_cpu->iowait_boost =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* First wakeup after IO: start with minimum boost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * sugov_iowait_apply() - Apply the IO boost to a CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @sg_cpu: the sugov data for the cpu to boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @time: the update time from the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @util: the utilization to (eventually) boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @max: the maximum value the utilization can be boosted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * A CPU running a task which woken up after an IO operation can have its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * utilization boosted to speed up the completion of those IO operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * The IO boost value is increased each time a task wakes up from IO, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * sugov_iowait_apply(), and it's instead decreased by this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * each time an increase has not been requested (!iowait_boost_pending).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * A CPU which also appears to have been idle for at least one tick has also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * its IO boost utilization reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * This mechanism is designed to boost high frequently IO waiting tasks, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * being more conservative on tasks which does sporadic IO operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned long util, unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* No boost currently required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!sg_cpu->iowait_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Reset boost if the CPU appears to have been idle enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (sugov_iowait_reset(sg_cpu, time, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!sg_cpu->iowait_boost_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * No boost pending; reduce the boost value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) sg_cpu->iowait_boost >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sg_cpu->iowait_boost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sg_cpu->iowait_boost_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @util is already in capacity scale; convert iowait_boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * into the same scale so we can compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return max(boost, util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #ifdef CONFIG_NO_HZ_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bool ret = idle_calls == sg_cpu->saved_idle_calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sg_cpu->saved_idle_calls = idle_calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #endif /* CONFIG_NO_HZ_COMMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Make sugov_should_update_freq() ignore the rate limit when DL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * has increased the utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sg_policy->limits_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void sugov_update_single(struct update_util_data *hook, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct sugov_policy *sg_policy = sg_cpu->sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long util, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned int next_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned int cached_freq = sg_policy->cached_raw_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sugov_iowait_boost(sg_cpu, time, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sg_cpu->last_update = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ignore_dl_rate_limit(sg_cpu, sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!sugov_should_update_freq(sg_policy, time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) util = sugov_get_util(sg_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) max = sg_cpu->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) util = sugov_iowait_apply(sg_cpu, time, util, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) next_f = get_next_freq(sg_policy, util, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Do not reduce the frequency if the CPU has not been idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * recently, as the reduction is likely to be premature then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) next_f = sg_policy->next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Restore cached freq as next_freq has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sg_policy->cached_raw_freq = cached_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * This code runs under rq->lock for the target CPU, so it won't run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * concurrently on two different CPUs for the same target and it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * necessary to acquire the lock in the fast switch case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (sg_policy->policy->fast_switch_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sugov_fast_switch(sg_policy, time, next_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) raw_spin_lock(&sg_policy->update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) sugov_deferred_update(sg_policy, time, next_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) raw_spin_unlock(&sg_policy->update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct sugov_policy *sg_policy = sg_cpu->sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct cpufreq_policy *policy = sg_policy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long util = 0, max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for_each_cpu(j, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long j_util, j_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) j_util = sugov_get_util(j_sg_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) j_max = j_sg_cpu->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (j_util * max > j_max * util) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) util = j_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) max = j_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return get_next_freq(sg_policy, util, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct sugov_policy *sg_policy = sg_cpu->sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int next_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) raw_spin_lock(&sg_policy->update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sugov_iowait_boost(sg_cpu, time, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) sg_cpu->last_update = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ignore_dl_rate_limit(sg_cpu, sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (sugov_should_update_freq(sg_policy, time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) next_f = sugov_next_freq_shared(sg_cpu, time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (sg_policy->policy->fast_switch_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) sugov_fast_switch(sg_policy, time, next_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sugov_deferred_update(sg_policy, time, next_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) raw_spin_unlock(&sg_policy->update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void sugov_work(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned int freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Hold sg_policy->update_lock shortly to handle the case where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * incase sg_policy->next_freq is read here, and then updated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * sugov_deferred_update() just before work_in_progress is set to false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * here, we may miss queueing the new update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Note: If a work was queued after the update_lock is released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * sugov_work() will just be called again by kthread_work code; and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * request will be proceed before the sugov thread sleeps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) freq = sg_policy->next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) sg_policy->work_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) mutex_lock(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mutex_unlock(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void sugov_irq_work(struct irq_work *irq_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct sugov_policy *sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) kthread_queue_work(&sg_policy->worker, &sg_policy->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /************************** sysfs interface ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static struct sugov_tunables *global_tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static DEFINE_MUTEX(global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return container_of(attr_set, struct sugov_tunables, attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return sprintf(buf, "%u\n", tunables->rate_limit_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct sugov_policy *sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned int rate_limit_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (kstrtouint(buf, 10, &rate_limit_us))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) tunables->rate_limit_us = rate_limit_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static ssize_t target_load_show(struct gov_attr_set *attr_set, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return sprintf(buf, "%u\n", tunables->target_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) target_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned int target_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (kstrtouint(buf, 10, &target_load))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!target_load || (target_load > 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) tunables->target_load = target_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static struct governor_attr target_load = __ATTR_RW(target_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static struct attribute *sugov_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) &rate_limit_us.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) &target_load.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ATTRIBUTE_GROUPS(sugov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void sugov_tunables_free(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) kfree(to_sugov_tunables(attr_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static struct kobj_type sugov_tunables_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .default_groups = sugov_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .sysfs_ops = &governor_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .release = &sugov_tunables_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /********************** cpufreq governor interface *********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct cpufreq_governor schedutil_gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct sugov_policy *sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) sg_policy->policy = policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) raw_spin_lock_init(&sg_policy->update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static void sugov_policy_free(struct sugov_policy *sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) kfree(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int sugov_kthread_create(struct sugov_policy *sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct sched_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .size = sizeof(struct sched_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .sched_policy = SCHED_DEADLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .sched_flags = SCHED_FLAG_SUGOV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .sched_nice = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .sched_priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Fake (unused) bandwidth; workaround to "fix"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * priority inheritance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .sched_runtime = 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .sched_deadline = 10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .sched_period = 10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct cpufreq_policy *policy = sg_policy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* kthread only required for slow path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (policy->fast_switch_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kthread_init_work(&sg_policy->work, sugov_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) kthread_init_worker(&sg_policy->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) "sugov:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cpumask_first(policy->related_cpus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (IS_ERR(thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return PTR_ERR(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = sched_setattr_nocheck(thread, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) kthread_stop(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) sg_policy->thread = thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) kthread_bind_mask(thread, policy->related_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) init_irq_work(&sg_policy->irq_work, sugov_irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mutex_init(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) wake_up_process(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void sugov_kthread_stop(struct sugov_policy *sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* kthread only required for slow path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (sg_policy->policy->fast_switch_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) kthread_flush_worker(&sg_policy->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) kthread_stop(sg_policy->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mutex_destroy(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct sugov_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!have_governor_per_policy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) global_tunables = tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static void sugov_clear_global_tunables(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!have_governor_per_policy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) global_tunables = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int sugov_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct sugov_policy *sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct sugov_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* State should be equivalent to EXIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (policy->governor_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpufreq_enable_fast_switch(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sg_policy = sugov_policy_alloc(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!sg_policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto disable_fast_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = sugov_kthread_create(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto free_sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mutex_lock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (global_tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (WARN_ON(have_governor_per_policy())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) goto stop_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) policy->governor_data = sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) sg_policy->tunables = global_tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) tunables = sugov_tunables_alloc(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) goto stop_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) tunables->target_load = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) policy->governor_data = sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sg_policy->tunables = tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) get_governor_parent_kobj(policy), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) schedutil_gov.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) kobject_put(&tunables->attr_set.kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) policy->governor_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) sugov_clear_global_tunables();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) stop_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) sugov_kthread_stop(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) free_sg_policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) sugov_policy_free(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) disable_fast_switch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cpufreq_disable_fast_switch(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pr_err("initialization failed (error %d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void sugov_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct sugov_policy *sg_policy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct sugov_tunables *tunables = sg_policy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mutex_lock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) policy->governor_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sugov_clear_global_tunables();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) sugov_kthread_stop(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sugov_policy_free(sg_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) cpufreq_disable_fast_switch(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int sugov_start(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct sugov_policy *sg_policy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) sg_policy->last_freq_update_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sg_policy->next_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) sg_policy->work_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) sg_policy->limits_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) sg_policy->cached_raw_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) memset(sg_cpu, 0, sizeof(*sg_cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) sg_cpu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) sg_cpu->sg_policy = sg_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) policy_is_shared(policy) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) sugov_update_shared :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) sugov_update_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static void sugov_stop(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct sugov_policy *sg_policy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) for_each_cpu(cpu, policy->cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cpufreq_remove_update_util_hook(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!policy->fast_switch_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) irq_work_sync(&sg_policy->irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kthread_cancel_work_sync(&sg_policy->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static void sugov_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct sugov_policy *sg_policy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!policy->fast_switch_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) mutex_lock(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cpufreq_policy_apply_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mutex_unlock(&sg_policy->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) sg_policy->limits_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct cpufreq_governor schedutil_gov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .name = "schedutil",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .init = sugov_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .exit = sugov_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .start = sugov_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .stop = sugov_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .limits = sugov_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct cpufreq_governor *cpufreq_default_governor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return &schedutil_gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) cpufreq_governor_init(schedutil_gov);