^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/cpufreq/cpufreq_governor.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Header file for CPUFreq governors common code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2001 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef _CPUFREQ_GOVERNOR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define _CPUFREQ_GOVERNOR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Ondemand Sampling types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Abbreviations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * dbs: used as a shortform for demand based switching It helps to keep variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * names smaller, simpler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * cdbs: common dbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * od_*: On-demand governor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * cs_*: Conservative governor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Governor demand based switching data (per-policy or global). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct dbs_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct gov_attr_set attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int ignore_nice_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int sampling_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int sampling_down_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int up_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int io_is_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return container_of(attr_set, struct dbs_data, attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define gov_show_one(_gov, file_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static ssize_t show_##file_name \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (struct gov_attr_set *attr_set, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct dbs_data *dbs_data = to_dbs_data(attr_set); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return sprintf(buf, "%u\n", tuners->file_name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define gov_show_one_common(file_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static ssize_t show_##file_name \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) (struct gov_attr_set *attr_set, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct dbs_data *dbs_data = to_dbs_data(attr_set); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return sprintf(buf, "%u\n", dbs_data->file_name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define gov_attr_ro(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static struct governor_attr _name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __ATTR(_name, 0444, show_##_name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define gov_attr_rw(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static struct governor_attr _name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __ATTR(_name, 0644, show_##_name, store_##_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Common to all CPUs of a policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct policy_dbs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Per policy mutex that serializes load evaluation from limit-change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * and work-handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct mutex update_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) s64 sample_delay_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) atomic_t work_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct irq_work irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* dbs_data may be shared between multiple policy objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct dbs_data *dbs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Multiplier for increasing sample delay temporarily. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned int rate_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned int idle_periods; /* For conservative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Status indicators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bool is_shared; /* This object is used by multiple CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool work_in_progress; /* Work is being queued up or in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int delay_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Per cpu structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct cpu_dbs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 prev_cpu_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u64 prev_update_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 prev_cpu_nice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Used to keep track of load in the previous interval. However, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * explicitly set to zero, it is used as a flag to ensure that we copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the previous load to the current interval only once, upon the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * wake-up from idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int prev_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct update_util_data update_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct policy_dbs_info *policy_dbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Common Governor data across policies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct dbs_governor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct cpufreq_governor gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct kobj_type kobj_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Common data for platforms that don't set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct dbs_data *gdbs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct policy_dbs_info *(*alloc)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void (*free)(struct policy_dbs_info *policy_dbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int (*init)(struct dbs_data *dbs_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void (*exit)(struct dbs_data *dbs_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void (*start)(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return container_of(policy->governor, struct dbs_governor, gov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Governor callback routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) .name = _name_, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .owner = THIS_MODULE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .init = cpufreq_dbs_governor_init, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .exit = cpufreq_dbs_governor_exit, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .start = cpufreq_dbs_governor_start, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .stop = cpufreq_dbs_governor_stop, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) .limits = cpufreq_dbs_governor_limits, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Governor specific operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct od_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int freq_next, unsigned int relation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned int dbs_update(struct cpufreq_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void od_register_powersave_bias_handler(unsigned int (*f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (struct cpufreq_policy *, unsigned int, unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int powersave_bias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void od_unregister_powersave_bias_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void gov_update_cpu_data(struct dbs_data *dbs_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif /* _CPUFREQ_GOVERNOR_H */