Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  drivers/cpufreq/cpufreq_conservative.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C)  2001 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *                      Jun Nakajima <jun.nakajima@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "cpufreq_governor.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) struct cs_policy_dbs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct policy_dbs_info policy_dbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	unsigned int down_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	unsigned int requested_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) struct cs_dbs_tuners {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	unsigned int down_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	unsigned int freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* Conservative governor macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define DEF_FREQUENCY_UP_THRESHOLD		(80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define DEF_FREQUENCY_STEP			(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define DEF_SAMPLING_DOWN_FACTOR		(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MAX_SAMPLING_DOWN_FACTOR		(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 					 struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* max freq cannot be less than 100. But who knows... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	if (unlikely(freq_step == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		freq_step = DEF_FREQUENCY_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	return freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Every sampling_rate, we check, if current idle time is less than 20%
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * (default), then we try to increase frequency. Every sampling_rate *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * sampling_down_factor, we check, if current idle time is more than 80%
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * (default), then we try to decrease frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * Frequency updates happen at minimum steps of 5% (default) of maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct policy_dbs_info *policy_dbs = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned int requested_freq = dbs_info->requested_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	unsigned int load = dbs_update(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned int freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * break out if we 'cannot' reduce the speed as the user might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * want freq_step to be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (cs_tuners->freq_step == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * If requested_freq is out of range, it is likely that the limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * changed in the meantime, so fall back to current frequency in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (requested_freq > policy->max || requested_freq < policy->min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		requested_freq = policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		dbs_info->requested_freq = requested_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	freq_step = get_freq_step(cs_tuners, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * Decrease requested_freq one freq_step for each idle period that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * we didn't update the frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (policy_dbs->idle_periods < UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		if (requested_freq > policy->min + freq_steps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			requested_freq -= freq_steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			requested_freq = policy->min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		policy_dbs->idle_periods = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/* Check for frequency increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (load > dbs_data->up_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		dbs_info->down_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		/* if we are already at full speed then break out early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (requested_freq == policy->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		requested_freq += freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (requested_freq > policy->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			requested_freq = policy->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		dbs_info->requested_freq = requested_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* if sampling_down_factor is active break out early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	dbs_info->down_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/* Check for frequency decrease */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (load < cs_tuners->down_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		 * if we cannot reduce the frequency anymore, break out early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (requested_freq == policy->min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (requested_freq > freq_step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			requested_freq -= freq_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			requested_freq = policy->min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		dbs_info->requested_freq = requested_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return dbs_data->sampling_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /************************** sysfs interface ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 					  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	dbs_data->sampling_down_factor = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	dbs_data->up_threshold = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* cannot be lower than 1 otherwise freq will not fall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (ret != 1 || input < 1 || input > 100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			input >= dbs_data->up_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	cs_tuners->down_threshold = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (input > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		input = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (input == dbs_data->ignore_nice_load) /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	dbs_data->ignore_nice_load = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/* we need to re-evaluate prev_cpu_idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	gov_update_cpu_data(dbs_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			       size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (input > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		input = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * no need to test here if freq_step is zero as the user might actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * want this, they would be crazy though :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	cs_tuners->freq_step = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) gov_show_one_common(sampling_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) gov_show_one_common(sampling_down_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) gov_show_one_common(up_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) gov_show_one_common(ignore_nice_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) gov_show_one(cs, down_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) gov_show_one(cs, freq_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) gov_attr_rw(sampling_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) gov_attr_rw(sampling_down_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) gov_attr_rw(up_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) gov_attr_rw(ignore_nice_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) gov_attr_rw(down_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) gov_attr_rw(freq_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct attribute *cs_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	&sampling_rate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	&sampling_down_factor.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	&up_threshold.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	&down_threshold.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	&ignore_nice_load.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	&freq_step.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /************************** sysfs end ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static struct policy_dbs_info *cs_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct cs_policy_dbs_info *dbs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return dbs_info ? &dbs_info->policy_dbs : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void cs_free(struct policy_dbs_info *policy_dbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	kfree(to_dbs_info(policy_dbs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int cs_init(struct dbs_data *dbs_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct cs_dbs_tuners *tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (!tuners)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	tuners->freq_step = DEF_FREQUENCY_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	dbs_data->ignore_nice_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	dbs_data->tuners = tuners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void cs_exit(struct dbs_data *dbs_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	kfree(dbs_data->tuners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void cs_start(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	dbs_info->down_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	dbs_info->requested_freq = policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static struct dbs_governor cs_governor = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	.kobj_type = { .default_attrs = cs_attributes },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	.gov_dbs_update = cs_dbs_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	.alloc = cs_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	.free = cs_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	.init = cs_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.exit = cs_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.start = cs_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define CPU_FREQ_GOV_CONSERVATIVE	(cs_governor.gov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		"Low Latency Frequency Transition capable processors "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		"optimised for use in a battery environment");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct cpufreq_governor *cpufreq_default_governor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return &CPU_FREQ_GOV_CONSERVATIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE);