Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * drivers/cpufreq/cpufreq_interactive.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2010-2016 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This software is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * License version 2, as published by the Free Software Foundation, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * may be copied, distributed, and modified under those terms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Author: Mike Chan (mike@android.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/input.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/sched/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <trace/events/cpufreq_interactive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define gov_attr_ro(_name)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static struct governor_attr _name =					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) __ATTR(_name, 0444, show_##_name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define gov_attr_wo(_name)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static struct governor_attr _name =					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) __ATTR(_name, 0200, NULL, store_##_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define gov_attr_rw(_name)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static struct governor_attr _name =					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) __ATTR(_name, 0644, show_##_name, store_##_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* Separate instance required for each 'interactive' directory in sysfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct interactive_tunables {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct gov_attr_set attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	/* Hi speed to bump to from lo speed when load burst (default max) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	unsigned int hispeed_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	/* Go to hi speed when CPU load at or above this value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define DEFAULT_GO_HISPEED_LOAD 99
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	unsigned long go_hispeed_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/* Target load. Lower values result in higher CPU speeds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	spinlock_t target_loads_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	unsigned int *target_loads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	int ntarget_loads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	 * The minimum amount of time to spend at a frequency before we can ramp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	 * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	unsigned long min_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	/* The sample rate of the timer used to increase frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	unsigned long sampling_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 * Wait this long before raising speed above hispeed, by default a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * single timer interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	spinlock_t above_hispeed_delay_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int *above_hispeed_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	int nabove_hispeed_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	/* Non-zero means indefinite speed boost active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/* Duration of a boot pulse in usecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	int boostpulse_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	/* End time of boost pulse in ktime converted to usecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	u64 boostpulse_endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	/* Frequency to which a touch boost takes the cpus to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	unsigned long touchboost_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/* Duration of a touchboost pulse in usecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	int touchboostpulse_duration_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	/* End time of touchboost pulse in ktime converted to usecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u64 touchboostpulse_endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	bool boosted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	 * Max additional time to wait in idle, beyond sampling_rate, at speeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned long timer_slack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	unsigned long timer_slack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	bool io_is_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* Separate instance required for each 'struct cpufreq_policy' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct interactive_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct interactive_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct list_head tunables_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) /* Separate instance required for each CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) struct interactive_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct update_util_data update_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct interactive_policy *ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct irq_work irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	u64 last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	unsigned long next_sample_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	bool work_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct rw_semaphore enable_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct timer_list slack_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	spinlock_t load_lock; /* protects the next 4 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	u64 time_in_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u64 time_in_idle_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u64 cputime_speedadj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	u64 cputime_speedadj_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	spinlock_t target_freq_lock; /*protects target freq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	unsigned int target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	unsigned int floor_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	u64 pol_floor_val_time; /* policy floor_validate_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /* Realtime thread handles frequency scaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static struct task_struct *speedchange_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static cpumask_t speedchange_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static spinlock_t speedchange_cpumask_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /* Target load. Lower values result in higher CPU speeds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define DEFAULT_TARGET_LOAD 90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static unsigned int default_above_hispeed_delay[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	DEFAULT_ABOVE_HISPEED_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) /* Iterate over interactive policies for tunables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define for_each_ipolicy(__ip)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static struct interactive_tunables *global_tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static DEFINE_MUTEX(global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static struct interactive_tunables backup_tunables[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static inline void update_slack_delay(struct interactive_tunables *tunables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 						       tunables->sampling_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static bool timer_slack_required(struct interactive_cpu *icpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	struct interactive_policy *ipolicy = icpu->ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	struct interactive_tunables *tunables = ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (tunables->timer_slack == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (icpu->target_freq > ipolicy->policy->min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	add_timer_on(&icpu->slack_timer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static void gov_slack_timer_modify(struct interactive_cpu *icpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 				bool modify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	spin_lock_irqsave(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	icpu->time_in_idle = get_cpu_idle_time(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 					       &icpu->time_in_idle_timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 					       tunables->io_is_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	icpu->cputime_speedadj = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (timer_slack_required(icpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		if (modify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			gov_slack_timer_modify(icpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			gov_slack_timer_start(icpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	spin_unlock_irqrestore(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			    unsigned int freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	     freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	ret = tunables->above_hispeed_delay[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				       unsigned int freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	for (i = 0; i < tunables->ntarget_loads - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	     freq >= tunables->target_loads[i + 1]; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	ret = tunables->target_loads[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * If increasing frequencies never map to a lower target load then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * choose_freq() will find the minimum frequency that does not exceed its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * target load given the current load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static unsigned int choose_freq(struct interactive_cpu *icpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				unsigned int loadadjfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct cpufreq_policy *policy = icpu->ipolicy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	unsigned int freq = policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		prevfreq = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		 * Find the lowest frequency where the computed load is less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		 * than or equal to the target load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 						       CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		freq = freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (freq > prevfreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			/* The previous frequency is too low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			freqmin = prevfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			if (freq < freqmax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			/* Find highest frequency that is less than freqmax */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			index = cpufreq_frequency_table_target(policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 					freqmax - 1, CPUFREQ_RELATION_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			freq = freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			if (freq == freqmin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				 * The first frequency below freqmax has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 				 * been found to be too low. freqmax is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				 * lowest speed we found that is fast enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				freq = freqmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		} else if (freq < prevfreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			/* The previous frequency is high enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			freqmax = prevfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			if (freq > freqmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			/* Find lowest frequency that is higher than freqmin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			index = cpufreq_frequency_table_target(policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 					freqmin + 1, CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			freq = freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			 * If freqmax is the first frequency above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			 * freqmin then we have already found that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			 * this speed is fast enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			if (freq == freqmax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		/* If same frequency chosen as previous then done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	} while (freq != prevfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	return freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static u64 update_load(struct interactive_cpu *icpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	u64 now_idle, now, active_time, delta_idle, delta_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	delta_idle = (now_idle - icpu->time_in_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	delta_time = (now - icpu->time_in_idle_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (delta_time <= delta_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		active_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		active_time = delta_time - delta_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	icpu->time_in_idle = now_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	icpu->time_in_idle_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	return now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) /* Re-evaluate load to see if a frequency change is required or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static void eval_target_freq(struct interactive_cpu *icpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct cpufreq_policy *policy = icpu->ipolicy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u64 cputime_speedadj, now, max_fvtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	unsigned int new_freq, loadadjfreq, index, delta_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	int cpu_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	spin_lock_irqsave(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	now = update_load(icpu, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	cputime_speedadj = icpu->cputime_speedadj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	spin_unlock_irqrestore(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (!delta_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	spin_lock_irqsave(&icpu->target_freq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	do_div(cputime_speedadj, delta_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	loadadjfreq = (unsigned int)cputime_speedadj * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	cpu_load = loadadjfreq / policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	tunables->boosted = tunables->boost ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			    now < tunables->boostpulse_endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		if (policy->cur < tunables->hispeed_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			new_freq = tunables->hispeed_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			new_freq = choose_freq(icpu, loadadjfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			if (new_freq < tunables->hispeed_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 				new_freq = tunables->hispeed_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		new_freq = choose_freq(icpu, loadadjfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		if (new_freq > tunables->hispeed_freq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		    policy->cur < tunables->hispeed_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			new_freq = tunables->hispeed_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (now < tunables->touchboostpulse_endtime &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	    new_freq < tunables->touchboost_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		new_freq = tunables->touchboost_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (policy->cur >= tunables->hispeed_freq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	    new_freq > policy->cur &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	    now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		trace_cpufreq_interactive_notyet(cpu, cpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				icpu->target_freq, policy->cur, new_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	icpu->loc_hispeed_val_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	index = cpufreq_frequency_table_target(policy, new_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 					       CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	new_freq = freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 * Do not scale below floor_freq unless we have been at or above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * floor frequency for the minimum sample time since last validated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		if (now - max_fvtime < tunables->min_sample_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			trace_cpufreq_interactive_notyet(cpu, cpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				icpu->target_freq, policy->cur, new_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 * Update the timestamp for checking whether speed has been held at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 * or above the selected frequency for a minimum of min_sample_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * allow the speed to drop as soon as the boostpulse duration expires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 * (or the indefinite boost is turned off).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		icpu->floor_freq = new_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			icpu->loc_floor_val_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (icpu->target_freq == new_freq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	    icpu->target_freq <= policy->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		trace_cpufreq_interactive_already(cpu, cpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			icpu->target_freq, policy->cur, new_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 					 policy->cur, new_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	icpu->target_freq = new_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	cpumask_set_cpu(cpu, &speedchange_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	wake_up_process(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static void cpufreq_interactive_update(struct interactive_cpu *icpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	eval_target_freq(icpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	slack_timer_resched(icpu, smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static void cpufreq_interactive_idle_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 						smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	unsigned long sampling_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (!down_read_trylock(&icpu->enable_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (icpu->ipolicy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		 * We haven't sampled load for more than sampling_rate time, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		 * it right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		if (time_after_eq(jiffies, icpu->next_sample_jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			sampling_rate = icpu->ipolicy->tunables->sampling_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			icpu->last_sample_time = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			icpu->next_sample_jiffies = usecs_to_jiffies(sampling_rate) + jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			cpufreq_interactive_update(icpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 						unsigned int *pmax_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 						u64 *phvt, u64 *pfvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	u64 hvt = ~0ULL, fvt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	unsigned int max_freq = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	for_each_cpu(i, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		icpu = &per_cpu(interactive_cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		fvt = max(fvt, icpu->loc_floor_val_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (icpu->target_freq > max_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			max_freq = icpu->target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			hvt = icpu->loc_hispeed_val_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		} else if (icpu->target_freq == max_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			hvt = min(hvt, icpu->loc_hispeed_val_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	*pmax_freq = max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	*phvt = hvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	*pfvt = fvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					   struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	u64 hvt, fvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	unsigned int max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	for_each_cpu(i, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		icpu = &per_cpu(interactive_cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		icpu->pol_floor_val_time = fvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (max_freq != policy->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		for_each_cpu(i, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			icpu = &per_cpu(interactive_cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			icpu->pol_hispeed_val_time = hvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static int cpufreq_interactive_speedchange_task(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	cpumask_t tmp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (cpumask_empty(&speedchange_cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	tmp_mask = speedchange_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	cpumask_clear(&speedchange_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	for_each_cpu(cpu, &tmp_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		policy = cpufreq_cpu_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		if (!policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		down_write(&policy->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if (likely(down_read_trylock(&icpu->enable_sem))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			if (likely(icpu->ipolicy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				cpufreq_interactive_adjust_cpu(cpu, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		up_write(&policy->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		cpufreq_cpu_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	struct interactive_policy *ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	unsigned long flags[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	bool wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	tunables->boosted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	for_each_ipolicy(ipolicy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		policy = ipolicy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		for_each_cpu(i, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			icpu = &per_cpu(interactive_cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			if (!down_read_trylock(&icpu->enable_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			if (!icpu->ipolicy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			if (icpu->target_freq < tunables->hispeed_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 				icpu->target_freq = tunables->hispeed_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				cpumask_set_cpu(i, &speedchange_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		wake_up_process(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static int cpufreq_interactive_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 					unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	struct cpufreq_freqs *freq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct cpufreq_policy *policy = freq->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (val != CPUFREQ_POSTCHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (!down_read_trylock(&icpu->enable_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		if (!icpu->ipolicy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		spin_lock_irqsave(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		update_load(icpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		spin_unlock_irqrestore(&icpu->load_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		up_read(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static struct notifier_block cpufreq_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	.notifier_call = cpufreq_interactive_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	const char *cp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int ntokens = 1, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	unsigned int *tokenized_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	while ((cp = strpbrk(cp + 1, " :")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		ntokens++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (!(ntokens & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (!tokenized_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	cp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	while (i < ntokens) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			goto err_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		cp = strpbrk(cp, " :");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		if (!cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (i != ntokens)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		goto err_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	*num_tokens = ntokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return tokenized_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) err_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	kfree(tokenized_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) /* Interactive governor sysfs interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	return container_of(attr_set, struct interactive_tunables, attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) #define show_one(file_name, type)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct interactive_tunables *tunables = to_tunables(attr_set);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return sprintf(buf, type "\n", tunables->file_name);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	for (i = 0; i < tunables->ntarget_loads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			       i & 0x1 ? ":" : " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	sprintf(buf + ret - 1, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static ssize_t store_target_loads(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	unsigned int *new_target_loads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	int ntokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	new_target_loads = get_tokenized_data(buf, &ntokens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (IS_ERR(new_target_loads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		return PTR_ERR(new_target_loads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (tunables->target_loads != default_target_loads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		kfree(tunables->target_loads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	tunables->target_loads = new_target_loads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	tunables->ntarget_loads = ntokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 					char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		ret += sprintf(buf + ret, "%u%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			       tunables->above_hispeed_delay[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			       i & 0x1 ? ":" : " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	sprintf(buf + ret - 1, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 					 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	unsigned int *new_above_hispeed_delay = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int ntokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (IS_ERR(new_above_hispeed_delay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		return PTR_ERR(new_above_hispeed_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		kfree(tunables->above_hispeed_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	tunables->above_hispeed_delay = new_above_hispeed_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	tunables->nabove_hispeed_delay = ntokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	unsigned long int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	tunables->hispeed_freq = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	tunables->go_hispeed_load = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	tunables->min_sample_time = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	return sprintf(buf, "%lu\n", tunables->sampling_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 				size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	unsigned long val, val_round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (val != val_round)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			val_round);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	tunables->sampling_rate = val_round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				 size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	ret = kstrtol(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	tunables->timer_slack = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	update_slack_delay(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			   size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	tunables->boost = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (tunables->boost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		trace_cpufreq_interactive_boost("on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (!tunables->boosted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			cpufreq_interactive_boost(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		trace_cpufreq_interactive_unboost("off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 					tunables->boostpulse_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	trace_cpufreq_interactive_boost("pulse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (!tunables->boosted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		cpufreq_interactive_boost(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 					 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	tunables->boostpulse_duration = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct interactive_tunables *tunables = to_tunables(attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	ret = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	tunables->io_is_busy = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) show_one(hispeed_freq, "%u");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) show_one(go_hispeed_load, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) show_one(min_sample_time, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) show_one(timer_slack, "%lu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) show_one(boost, "%u");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) show_one(boostpulse_duration, "%u");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) show_one(io_is_busy, "%u");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) gov_attr_rw(target_loads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) gov_attr_rw(above_hispeed_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) gov_attr_rw(hispeed_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) gov_attr_rw(go_hispeed_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) gov_attr_rw(min_sample_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) gov_attr_rw(timer_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) gov_attr_rw(timer_slack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) gov_attr_rw(boost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) gov_attr_wo(boostpulse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) gov_attr_rw(boostpulse_duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) gov_attr_rw(io_is_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static struct attribute *interactive_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	&target_loads.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	&above_hispeed_delay.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	&hispeed_freq.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	&go_hispeed_load.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	&min_sample_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	&timer_rate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	&timer_slack.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	&boost.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	&boostpulse.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	&boostpulse_duration.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	&io_is_busy.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static struct kobj_type interactive_tunables_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	.default_attrs = interactive_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	.sysfs_ops = &governor_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 					     unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (val == IDLE_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		cpufreq_interactive_idle_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static struct notifier_block cpufreq_interactive_idle_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	.notifier_call = cpufreq_interactive_idle_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Interactive Governor callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct interactive_governor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	struct cpufreq_governor gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	unsigned int usage_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static struct interactive_governor interactive_gov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #define CPU_FREQ_GOV_INTERACTIVE	(&interactive_gov.gov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void irq_work(struct irq_work *irq_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct interactive_cpu *icpu = container_of(irq_work, struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 						    interactive_cpu, irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	cpufreq_interactive_update(icpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	icpu->work_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void update_util_handler(struct update_util_data *data, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 				unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct interactive_cpu *icpu = container_of(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 					struct interactive_cpu, update_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct interactive_policy *ipolicy = icpu->ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct interactive_tunables *tunables = ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	u64 delta_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 * The irq-work may not be allowed to be queued up right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 * Possible reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 * - Work has already been queued up or is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 * - It is too early (too little time from the previous sample).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (icpu->work_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	delta_ns = time - icpu->last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	icpu->last_sample_time = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 				    jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	icpu->work_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	irq_work_queue_on(&icpu->irq_work, icpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void gov_set_update_util(struct interactive_policy *ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct cpufreq_policy *policy = ipolicy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		icpu->last_sample_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		icpu->next_sample_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		cpufreq_add_update_util_hook(cpu, &icpu->update_util,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 					     update_util_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static inline void gov_clear_update_util(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	for_each_cpu(i, policy->cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		cpufreq_remove_update_util_hook(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static void icpu_cancel_work(struct interactive_cpu *icpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	irq_work_sync(&icpu->irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	icpu->work_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	del_timer_sync(&icpu->slack_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static struct interactive_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) interactive_policy_alloc(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	struct interactive_policy *ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (!ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	ipolicy->policy = policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	return ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void interactive_policy_free(struct interactive_policy *ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	kfree(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static struct interactive_tunables *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) interactive_tunables_alloc(struct interactive_policy *ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct interactive_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	if (!tunables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (!have_governor_per_policy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		global_tunables = tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	ipolicy->tunables = tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	return tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static void interactive_tunables_free(struct interactive_tunables *tunables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (!have_governor_per_policy())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		global_tunables = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	kfree(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static void cpufreq_interactive_input_event(struct input_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 					    unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 					    unsigned int code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 					    int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	u64 now, endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	int anyboost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	unsigned long flags[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	struct interactive_cpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct interactive_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (type != EV_ABS && type != EV_KEY && type != EV_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	trace_cpufreq_interactive_boost("touch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	now = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		pcpu = &per_cpu(interactive_cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		if (!down_read_trylock(&pcpu->enable_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		if (!pcpu->ipolicy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			up_read(&pcpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		tunables = pcpu->ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		if (!tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			up_read(&pcpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		endtime = now + tunables->touchboostpulse_duration_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		if (endtime < (tunables->touchboostpulse_endtime +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			       10 * USEC_PER_MSEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			up_read(&pcpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		tunables->touchboostpulse_endtime = endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		if (pcpu->target_freq < tunables->touchboost_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			pcpu->target_freq = tunables->touchboost_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			cpumask_set_cpu(i, &speedchange_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			pcpu->loc_hispeed_val_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 					ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			anyboost = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		pcpu->floor_freq = tunables->touchboost_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		pcpu->loc_floor_val_time = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		up_read(&pcpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (anyboost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		wake_up_process(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static int cpufreq_interactive_input_connect(struct input_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 					     struct input_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 					     const struct input_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct input_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	handle->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	handle->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	handle->name = "cpufreq";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	error = input_register_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	error = input_open_device(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	input_unregister_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	kfree(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	input_close_device(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	input_unregister_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	kfree(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static const struct input_device_id cpufreq_interactive_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			INPUT_DEVICE_ID_MATCH_ABSBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		.evbit = { BIT_MASK(EV_ABS) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			BIT_MASK(ABS_MT_POSITION_X) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			BIT_MASK(ABS_MT_POSITION_Y) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			INPUT_DEVICE_ID_MATCH_ABSBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		.absbit = { [BIT_WORD(ABS_X)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		.evbit = { BIT_MASK(EV_KEY) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	{/* A mouse like device, at least one button,two relative axes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				INPUT_DEVICE_ID_MATCH_KEYBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				INPUT_DEVICE_ID_MATCH_RELBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		.relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	{/* A separate scrollwheel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				INPUT_DEVICE_ID_MATCH_RELBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		.relbit = { BIT_MASK(REL_WHEEL) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static struct input_handler cpufreq_interactive_input_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	.event		= cpufreq_interactive_input_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	.connect	= cpufreq_interactive_input_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	.disconnect	= cpufreq_interactive_input_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	.name		= "cpufreq_interactive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	.id_table	= cpufreq_interactive_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static void rockchip_cpufreq_policy_init(struct interactive_policy *ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct interactive_tunables *tunables = ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	struct gov_attr_set attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	tunables->min_sample_time = 40 * USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	tunables->boostpulse_duration = 40 * USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (ipolicy->policy->cpu == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		tunables->hispeed_freq = 1008000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		tunables->touchboost_freq = 1200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		tunables->hispeed_freq = 816000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	index = (ipolicy->policy->cpu == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	if (!backup_tunables[index].sampling_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		backup_tunables[index] = *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		attr_set = tunables->attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		*tunables = backup_tunables[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		tunables->attr_set = attr_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int cpufreq_interactive_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	struct interactive_policy *ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	struct interactive_tunables *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	/* State should be equivalent to EXIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (policy->governor_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	ipolicy = interactive_policy_alloc(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (!ipolicy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	mutex_lock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (global_tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		if (WARN_ON(have_governor_per_policy())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			goto free_int_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		policy->governor_data = ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		ipolicy->tunables = global_tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		gov_attr_set_get(&global_tunables->attr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				 &ipolicy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	tunables = interactive_tunables_alloc(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (!tunables) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		goto free_int_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	tunables->hispeed_freq = policy->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	tunables->above_hispeed_delay = default_above_hispeed_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	tunables->nabove_hispeed_delay =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		ARRAY_SIZE(default_above_hispeed_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	tunables->target_loads = default_target_loads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	tunables->timer_slack = DEFAULT_TIMER_SLACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	update_slack_delay(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	spin_lock_init(&tunables->target_loads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	spin_lock_init(&tunables->above_hispeed_delay_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	policy->governor_data = ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	rockchip_cpufreq_policy_init(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	ret = kobject_init_and_add(&tunables->attr_set.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 				   &interactive_tunables_ktype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				   get_governor_parent_kobj(policy), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				   interactive_gov.gov.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	/* One time initialization for governor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (!interactive_gov.usage_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		idle_notifier_register(&cpufreq_interactive_idle_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		cpufreq_register_notifier(&cpufreq_notifier_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 					  CPUFREQ_TRANSITION_NOTIFIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		ret = input_register_handler(&cpufreq_interactive_input_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	policy->governor_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	interactive_tunables_free(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  free_int_policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	interactive_policy_free(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	pr_err("governor initialization failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) void cpufreq_interactive_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	struct interactive_policy *ipolicy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	struct interactive_tunables *tunables = ipolicy->tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	mutex_lock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	/* Last policy using the governor ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (!--interactive_gov.usage_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		cpufreq_unregister_notifier(&cpufreq_notifier_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 					    CPUFREQ_TRANSITION_NOTIFIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		input_unregister_handler(&cpufreq_interactive_input_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	policy->governor_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) #ifdef CONFIG_ARCH_ROCKCHIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (policy->cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			backup_tunables[0] = *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			backup_tunables[1] = *tunables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		interactive_tunables_free(tunables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	mutex_unlock(&global_tunables_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	interactive_policy_free(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int cpufreq_interactive_start(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	struct interactive_policy *ipolicy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		icpu->target_freq = policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		icpu->floor_freq = icpu->target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		icpu->pol_floor_val_time = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		icpu->loc_floor_val_time = icpu->pol_floor_val_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		icpu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		down_write(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		icpu->ipolicy = ipolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		slack_timer_resched(icpu, cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		up_write(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	gov_set_update_util(ipolicy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) void cpufreq_interactive_stop(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	struct interactive_policy *ipolicy = policy->governor_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	gov_clear_update_util(ipolicy->policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		down_write(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		icpu_cancel_work(icpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		icpu->ipolicy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		up_write(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) void cpufreq_interactive_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	cpufreq_policy_apply_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	for_each_cpu(cpu, policy->cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		spin_lock_irqsave(&icpu->target_freq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		if (policy->max < icpu->target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			icpu->target_freq = policy->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		else if (policy->min > icpu->target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			icpu->target_freq = policy->min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static struct interactive_governor interactive_gov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	.gov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		.name			= "interactive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		.init			= cpufreq_interactive_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		.exit			= cpufreq_interactive_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		.start			= cpufreq_interactive_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		.stop			= cpufreq_interactive_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		.limits			= cpufreq_interactive_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static void cpufreq_interactive_nop_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	 * to decrease its frequency if it is not set to minimum already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * This is important for platforms where CPU with higher frequencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 * consume higher power even at IDLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static int __init cpufreq_interactive_gov_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	struct interactive_cpu *icpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		icpu = &per_cpu(interactive_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		init_irq_work(&icpu->irq_work, irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		spin_lock_init(&icpu->load_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		spin_lock_init(&icpu->target_freq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		init_rwsem(&icpu->enable_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		/* Initialize per-cpu slack-timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		timer_setup(&icpu->slack_timer, cpufreq_interactive_nop_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			    TIMER_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	spin_lock_init(&speedchange_cpumask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 					  NULL, "cfinteractive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (IS_ERR(speedchange_task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		return PTR_ERR(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	get_task_struct(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	/* wake up so the thread does not look hung to the freezer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	wake_up_process(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct cpufreq_governor *cpufreq_default_governor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	return CPU_FREQ_GOV_INTERACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) fs_initcall(cpufreq_interactive_gov_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) module_init(cpufreq_interactive_gov_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static void __exit cpufreq_interactive_gov_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	kthread_stop(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	put_task_struct(speedchange_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) module_exit(cpufreq_interactive_gov_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) MODULE_AUTHOR("Mike Chan <mike@android.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) MODULE_LICENSE("GPL");