Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * intel_pstate.c: Native P state management for Intel processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * (C) Copyright 2012 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/sched/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define INTEL_PSTATE_SAMPLING_INTERVAL	(10 * NSEC_PER_MSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP	5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define INTEL_CPUFREQ_TRANSITION_DELAY		500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <acpi/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <acpi/cppc_acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define FRAC_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define fp_toint(X) ((X) >> FRAC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define EXT_BITS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static inline int32_t mul_fp(int32_t x, int32_t y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static inline int32_t div_fp(s64 x, s64 y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	return div64_s64((int64_t)x << FRAC_BITS, y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static inline int ceiling_fp(int32_t x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	int mask, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	ret = fp_toint(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	mask = (1 << FRAC_BITS) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	if (x & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		ret += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static inline int32_t percent_fp(int percent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	return div_fp(percent, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static inline u64 mul_ext_fp(u64 x, u64 y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return (x * y) >> EXT_FRAC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static inline u64 div_ext_fp(u64 x, u64 y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	return div64_u64(x << EXT_FRAC_BITS, y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static inline int32_t percent_ext_fp(int percent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return div_ext_fp(percent, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * struct sample -	Store performance sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *			performance during last sample period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * @busy_scaled:	Scaled busy value which is used to calculate next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  *			P state. This can be different than core_avg_perf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  *			to account for cpu idle period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * @aperf:		Difference of actual performance frequency clock count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  *			read from APERF MSR between last and current sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * @mperf:		Difference of maximum performance frequency clock count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  *			read from MPERF MSR between last and current sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * @tsc:		Difference of time stamp counter between last and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  *			current sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * @time:		Current time from scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * This structure is used in the cpudata structure to store performance sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * data for choosing next P State.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct sample {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	int32_t core_avg_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	int32_t busy_scaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	u64 aperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	u64 mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u64 tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * struct pstate_data - Store P state data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * @current_pstate:	Current requested P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * @min_pstate:		Min P state possible for this platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * @max_pstate:		Max P state possible for this platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * @max_pstate_physical:This is physical Max P state for a processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  *			This can be higher than the max_pstate which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *			be limited by platform thermal design power limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * @scaling:		Scaling factor to  convert frequency to cpufreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  *			frequency units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * @turbo_pstate:	Max Turbo P state possible for this platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * @max_freq:		@max_pstate frequency in cpufreq units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * @turbo_freq:		@turbo_pstate frequency in cpufreq units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * Stores the per cpu model P state limits and current P state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) struct pstate_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	int	current_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	int	min_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	int	max_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	int	max_pstate_physical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	int	scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	int	turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	unsigned int max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	unsigned int turbo_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * struct vid_data -	Stores voltage information data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * @min:		VID data for this platform corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *			the lowest P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @max:		VID data corresponding to the highest P State.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @turbo:		VID data for turbo P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * @ratio:		Ratio of (vid max - vid min) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *			(max P state - Min P State)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * This data is used in Atom platforms, where in addition to target P state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * the voltage data needs to be specified to select next P State.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) struct vid_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	int min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	int turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	int32_t ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * struct global_params - Global parameters, mostly tunable via sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * @no_turbo:		Whether or not to use turbo P-states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * @turbo_disabled:	Whether or not turbo P-states are available at all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *			based on the MSR_IA32_MISC_ENABLE value and whether or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  *			not the maximum reported turbo P-state is different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  *			the maximum reported non-turbo one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * @turbo_disabled_mf:	The @turbo_disabled value reflected by cpuinfo.max_freq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *			P-state capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  *			P-state capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) struct global_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	bool no_turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	bool turbo_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	bool turbo_disabled_mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	int max_perf_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	int min_perf_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * struct cpudata -	Per CPU instance data storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @cpu:		CPU number for this instance data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * @policy:		CPUFreq policy value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * @update_util:	CPUFreq utility callback information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * @update_util_set:	CPUFreq utility callback is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * @iowait_boost:	iowait-related boost fraction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * @last_update:	Time of the last update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * @pstate:		Stores P state limits for this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * @vid:		Stores VID limits for this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * @last_sample_time:	Last Sample time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * @aperf_mperf_shift:	APERF vs MPERF counting frequency difference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * @prev_aperf:		Last APERF value read from APERF MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * @prev_mperf:		Last MPERF value read from MPERF MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * @prev_tsc:		Last timestamp counter (TSC) value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * @prev_cummulative_iowait: IO Wait time difference from last and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  *			current sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * @sample:		Storage for storing last Sample data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * @epp_powersave:	Last saved HWP energy performance preference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *			(EPP) or energy performance bias (EPB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *			when policy switched to performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * @epp_policy:		Last saved policy used to set EPP/EPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * @epp_default:	Power on default HWP energy performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  *			preference/bias
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * @epp_cached		Cached HWP energy-performance preference value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * @hwp_req_cached:	Cached value of the last HWP Request MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * @last_io_update:	Last time when IO wake flag was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * @sched_flags:	Store scheduler flags for possible cross CPU update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * @hwp_boost_min:	Last HWP boosted min performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * @suspended:		Whether or not the driver has been suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * This structure stores per CPU instance data for all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) struct cpudata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	unsigned int policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct update_util_data update_util;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	bool   update_util_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct pstate_data pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct vid_data vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	u64	last_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	u64	last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	u64	aperf_mperf_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	u64	prev_aperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	u64	prev_mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	u64	prev_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64	prev_cummulative_iowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct sample sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	int32_t	min_perf_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	int32_t	max_perf_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct acpi_processor_performance acpi_perf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	bool valid_pss_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	unsigned int iowait_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	s16 epp_powersave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	s16 epp_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	s16 epp_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	s16 epp_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u64 hwp_req_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	u64 hwp_cap_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	u64 last_io_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	unsigned int sched_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	u32 hwp_boost_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	bool suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static struct cpudata **all_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * struct pstate_funcs - Per CPU model specific callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * @get_max:		Callback to get maximum non turbo effective P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * @get_max_physical:	Callback to get maximum non turbo physical P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * @get_min:		Callback to get minimum P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * @get_turbo:		Callback to get turbo P state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * @get_scaling:	Callback to get frequency scaling factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * @get_val:		Callback to convert P state to actual MSR write value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * @get_vid:		Callback to get VID data for Atom platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * Core and Atom CPU models have different way to get P State limits. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * structure is used to store those callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) struct pstate_funcs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	int (*get_max)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	int (*get_max_physical)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	int (*get_min)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	int (*get_turbo)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	int (*get_scaling)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	int (*get_aperf_mperf_shift)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u64 (*get_val)(struct cpudata*, int pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	void (*get_vid)(struct cpudata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static struct pstate_funcs pstate_funcs __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static int hwp_active __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static int hwp_mode_bdw __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static bool per_cpu_limits __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static bool hwp_boost __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static struct cpufreq_driver *intel_pstate_driver __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static bool acpi_ppc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static struct global_params global;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static DEFINE_MUTEX(intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static DEFINE_MUTEX(intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static bool intel_pstate_acpi_pm_profile_server(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static bool intel_pstate_get_ppc_enable_status(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (intel_pstate_acpi_pm_profile_server())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	return acpi_ppc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) #ifdef CONFIG_ACPI_CPPC_LIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) /* The work item is needed to avoid CPU hotplug locking issues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	sched_set_itmt_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static void intel_pstate_set_itmt_prio(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct cppc_perf_caps cppc_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * The priorities can be set regardless of whether or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * sched_set_itmt_support(true) has been called and it is valid to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 * update them at any time after it has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (max_highest_perf <= min_highest_perf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		if (cppc_perf.highest_perf > max_highest_perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			max_highest_perf = cppc_perf.highest_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		if (cppc_perf.highest_perf < min_highest_perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			min_highest_perf = cppc_perf.highest_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (max_highest_perf > min_highest_perf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			 * This code can be run during CPU online under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			 * CPU hotplug locks, so sched_set_itmt_support()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			 * cannot be called from here.  Queue up a work item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			 * to invoke it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			schedule_work(&sched_itmt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static int intel_pstate_get_cppc_guranteed(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct cppc_perf_caps cppc_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (cppc_perf.guaranteed_perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		return cppc_perf.guaranteed_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	return cppc_perf.nominal_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) #else /* CONFIG_ACPI_CPPC_LIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) static void intel_pstate_set_itmt_prio(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) #endif /* CONFIG_ACPI_CPPC_LIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		intel_pstate_set_itmt_prio(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (!intel_pstate_get_ppc_enable_status())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 						  policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * guarantee that the states returned by it map to the states in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * list directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (cpu->acpi_perf_data.control_register.space_id !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 						ACPI_ADR_SPACE_FIXED_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	 * If there is only one entry _PSS, simply ignore _PSS and continue as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 * usual without taking _PSS into account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (cpu->acpi_perf_data.state_count < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			 (u32) cpu->acpi_perf_data.states[i].power,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			 (u32) cpu->acpi_perf_data.states[i].control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * The _PSS table doesn't contain whole turbo frequency range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * This just contains +1 MHZ above the max non turbo frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * with control value corresponding to max turbo ratio. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 * when cpufreq set policy is called, it will call with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 * max frequency, which will cause a reduced performance as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	 * this driver uses real max turbo frequency as the max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * frequency. So correct this frequency in _PSS table to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * correct max turbo frequency based on the turbo state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 * Also need to convert to MHz as _PSS freq is in MHz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!global.turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		cpu->acpi_perf_data.states[0].core_frequency =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 					policy->cpuinfo.max_freq / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	cpu->valid_pss_table = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	pr_debug("_PPC limits will be enforced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	cpu->valid_pss_table = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	acpi_processor_unregister_performance(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (!cpu->valid_pss_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	acpi_processor_unregister_performance(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) #else /* CONFIG_ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static inline bool intel_pstate_acpi_pm_profile_server(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) #endif /* CONFIG_ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) #ifndef CONFIG_ACPI_CPPC_LIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static int intel_pstate_get_cppc_guranteed(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) #endif /* CONFIG_ACPI_CPPC_LIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static inline void update_turbo_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	u64 misc_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	cpu = all_cpu_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	global.turbo_disabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static int min_perf_pct_min(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct cpudata *cpu = all_cpu_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	int turbo_pstate = cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return turbo_pstate ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	u64 epb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	if (!boot_cpu_has(X86_FEATURE_EPB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		return (s16)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	return (s16)(epb & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	s16 epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		 * When hwp_req_data is 0, means that caller didn't read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		 * MSR_HWP_REQUEST, so need to read and get EPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		if (!hwp_req_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					    &hwp_req_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			if (epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				return epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		epp = (hwp_req_data >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		/* When there is no EPP present, HWP uses EPB settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		epp = intel_pstate_get_epb(cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	return epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) static int intel_pstate_set_epb(int cpu, s16 pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	u64 epb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (!boot_cpu_has(X86_FEATURE_EPB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	epb = (epb & ~0x0f) | pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * EPP/EPB display strings corresponding to EPP index in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * energy_perf_strings[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *	index		String
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  *-------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  *	0		default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  *	1		performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *	2		balance_performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  *	3		balance_power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *	4		power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static const char * const energy_perf_strings[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	"default",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	"performance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	"balance_performance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	"balance_power",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	"power",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static const unsigned int epp_values[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	HWP_EPP_PERFORMANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	HWP_EPP_BALANCE_PERFORMANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	HWP_EPP_BALANCE_POWERSAVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	HWP_EPP_POWERSAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	s16 epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	int index = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	*raw_epp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	epp = intel_pstate_get_epp(cpu_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	if (epp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		return epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (epp == HWP_EPP_PERFORMANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (epp == HWP_EPP_BALANCE_PERFORMANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (epp == HWP_EPP_BALANCE_POWERSAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		if (epp == HWP_EPP_POWERSAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		*raw_epp = epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	} else if (boot_cpu_has(X86_FEATURE_EPB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		 * Range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		 *	0x00-0x03	:	Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		 *	0x04-0x07	:	Balance performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		 *	0x08-0x0B	:	Balance power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		 *	0x0C-0x0F	:	Power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 * The EPB is a 4 bit value, but our ranges restrict the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		 * value which can be set. Here only using top two bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		 * effectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		index = (epp >> 2) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * Use the cached HWP Request MSR value, because in the active mode the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 * register itself may be updated by intel_pstate_hwp_boost_up() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	 * intel_pstate_hwp_boost_down() at any time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	u64 value = READ_ONCE(cpu->hwp_req_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	value &= ~GENMASK_ULL(31, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	value |= (u64)epp << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * The only other updater of hwp_req_cached in the active mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 * intel_pstate_hwp_set(), is called under the same lock as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 * function, so it cannot run in parallel with the update below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	WRITE_ONCE(cpu->hwp_req_cached, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		cpu->epp_cached = epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 					      int pref_index, bool use_raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 					      u32 raw_epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	int epp = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (!pref_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		epp = cpu_data->epp_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (use_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			epp = raw_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		else if (epp == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			epp = epp_values[pref_index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * To avoid confusion, refuse to set EPP to any values different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * from 0 (performance) if the current policy is "performance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 * because those values would be overridden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		ret = intel_pstate_set_epp(cpu_data, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		if (epp == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			epp = (pref_index - 1) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static ssize_t show_energy_performance_available_preferences(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				struct cpufreq_policy *policy, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	while (energy_perf_strings[i] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	ret += sprintf(&buf[ret], "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) cpufreq_freq_attr_ro(energy_performance_available_preferences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static struct cpufreq_driver intel_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static ssize_t store_energy_performance_preference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		struct cpufreq_policy *policy, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	char str_preference[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	bool raw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	u32 epp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	ret = sscanf(buf, "%20s", str_preference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	ret = match_string(energy_perf_strings, -1, str_preference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		ret = kstrtouint(buf, 10, &epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		if (epp > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		raw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * This function runs with the policy R/W semaphore held, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * guarantees that the driver pointer will not change while it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (!intel_pstate_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	if (intel_pstate_driver == &intel_pstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		 * In the passive mode the governor needs to be stopped on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		 * target CPU before the EPP update and restarted after it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		 * which is super-heavy-weight, so make sure it is worth doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		 * upfront.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (!raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			epp = ret ? epp_values[ret - 1] : cpu->epp_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		if (cpu->epp_cached != epp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			cpufreq_stop_governor(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			ret = intel_pstate_set_epp(cpu, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			err = cpufreq_start_governor(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 				ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return ret ?: count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static ssize_t show_energy_performance_preference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				struct cpufreq_policy *policy, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	int preference, raw_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (preference < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		return preference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (raw_epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		return  sprintf(buf, "%d\n", raw_epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) cpufreq_freq_attr_rw(energy_performance_preference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	u64 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	int ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (ratio <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		ratio = HWP_GUARANTEED_PERF(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) cpufreq_freq_attr_ro(base_frequency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static struct freq_attr *hwp_cpufreq_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	&energy_performance_preference,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	&energy_performance_available_preferences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	&base_frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 				     int *current_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	u64 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	WRITE_ONCE(cpu->hwp_cap_cached, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (global.no_turbo || global.turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		*current_max = HWP_GUARANTEED_PERF(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		*current_max = HWP_HIGHEST_PERF(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	*phy_max = HWP_HIGHEST_PERF(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static void intel_pstate_hwp_set(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct cpudata *cpu_data = all_cpu_data[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	int max, min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	s16 epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	max = cpu_data->max_perf_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	min = cpu_data->min_perf_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		min = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	value &= ~HWP_MIN_PERF(~0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	value |= HWP_MIN_PERF(min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	value &= ~HWP_MAX_PERF(~0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	value |= HWP_MAX_PERF(max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (cpu_data->epp_policy == cpu_data->policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		goto skip_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	cpu_data->epp_policy = cpu_data->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		epp = intel_pstate_get_epp(cpu_data, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		cpu_data->epp_powersave = epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		/* If EPP read was failed, then don't try to write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (epp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			goto skip_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		epp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		/* skip setting EPP, when saved value is invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		if (cpu_data->epp_powersave < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			goto skip_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		 * No need to restore EPP when it is not zero. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 * means:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 *  - Policy is not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 *  - user has manually changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 *  - Error reading EPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		epp = intel_pstate_get_epp(cpu_data, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (epp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			goto skip_epp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		epp = cpu_data->epp_powersave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		value &= ~GENMASK_ULL(31, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		value |= (u64)epp << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		intel_pstate_set_epb(cpu, epp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) skip_epp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	WRITE_ONCE(cpu_data->hwp_req_cached, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static void intel_pstate_hwp_offline(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	u64 value = READ_ONCE(cpu->hwp_req_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	int min_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		 * In case the EPP has been set to "performance" by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		 * active mode "performance" scaling algorithm, replace that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		 * temporary value with the cached EPP one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		value &= ~GENMASK_ULL(31, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		WRITE_ONCE(cpu->hwp_req_cached, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	value &= ~GENMASK_ULL(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* Set hwp_max = hwp_min */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	value |= HWP_MAX_PERF(min_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	value |= HWP_MIN_PERF(min_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* Set EPP to min */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (boot_cpu_has(X86_FEATURE_HWP_EPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) #define POWER_CTL_EE_ENABLE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #define POWER_CTL_EE_DISABLE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) static int power_ctl_ee_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static void set_power_ctl_ee_state(bool input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	u64 power_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if (input) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		power_ctl_ee_state = POWER_CTL_EE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		power_ctl_ee_state = POWER_CTL_EE_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void intel_pstate_hwp_enable(struct cpudata *cpudata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static void intel_pstate_hwp_reenable(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	intel_pstate_hwp_enable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static int intel_pstate_suspend(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	pr_debug("CPU %d suspending\n", cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	cpu->suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static int intel_pstate_resume(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	pr_debug("CPU %d resuming\n", cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	/* Only restore if the system default is changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		set_power_ctl_ee_state(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		set_power_ctl_ee_state(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (cpu->suspended && hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		/* Re-enable HWP, because "online" has not done that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		intel_pstate_hwp_reenable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	cpu->suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static void intel_pstate_update_policies(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		cpufreq_update_policy(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static void intel_pstate_update_max_freq(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct cpudata *cpudata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (!policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	cpudata = all_cpu_data[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	refresh_frequency_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	cpufreq_cpu_release(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static void intel_pstate_update_limits(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * If turbo has been turned on or off globally, policy limits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * all CPUs need to be updated to reflect that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (global.turbo_disabled_mf != global.turbo_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		global.turbo_disabled_mf = global.turbo_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		arch_set_max_freq_ratio(global.turbo_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			intel_pstate_update_max_freq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		cpufreq_update_policy(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /************************** sysfs begin ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #define show_one(file_name, object)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	static ssize_t show_##file_name					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	{								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		return sprintf(buf, "%u\n", global.object);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static ssize_t intel_pstate_show_status(char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static int intel_pstate_update_status(const char *buf, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static ssize_t show_status(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			   struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	ret = intel_pstate_show_status(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	char *p = memchr(buf, '\n', count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	ret = intel_pstate_update_status(buf, p ? p - buf : count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	return ret < 0 ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static ssize_t show_turbo_pct(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 				struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	int total, no_turbo, turbo_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	uint32_t turbo_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	cpu = all_cpu_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	turbo_fp = div_fp(no_turbo, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	return sprintf(buf, "%u\n", turbo_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static ssize_t show_num_pstates(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 				struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	cpu = all_cpu_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	return sprintf(buf, "%u\n", total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static ssize_t show_no_turbo(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			     struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (global.turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		ret = sprintf(buf, "%u\n", global.turbo_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		ret = sprintf(buf, "%u\n", global.no_turbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (global.turbo_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	global.no_turbo = clamp_t(int, input, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (global.no_turbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		struct cpudata *cpu = all_cpu_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		/* Squash the global minimum into the permitted range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (global.min_perf_pct > pct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			global.min_perf_pct = pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	intel_pstate_update_policies();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void update_qos_request(enum freq_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	int max_state, turbo_max, freq, i, perf_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct freq_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		struct cpudata *cpu = all_cpu_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		policy = cpufreq_cpu_get(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		if (!policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		req = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		cpufreq_cpu_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		if (hwp_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			turbo_max = cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		if (type == FREQ_QOS_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			perf_pct = global.min_perf_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			req++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			perf_pct = global.max_perf_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		freq *= cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		if (freq_qos_update_request(req, freq) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (intel_pstate_driver == &intel_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		intel_pstate_update_policies();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		update_qos_request(FREQ_QOS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	ret = sscanf(buf, "%u", &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (!intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	global.min_perf_pct = clamp_t(int, input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				      min_perf_pct_min(), global.max_perf_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (intel_pstate_driver == &intel_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		intel_pstate_update_policies();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		update_qos_request(FREQ_QOS_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 				struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	return sprintf(buf, "%u\n", hwp_boost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static ssize_t store_hwp_dynamic_boost(struct kobject *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				       struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 				       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	unsigned int input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	ret = kstrtouint(buf, 10, &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	hwp_boost = !!input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	intel_pstate_update_policies();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 				      char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	u64 power_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	int enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	return sprintf(buf, "%d\n", !enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 				       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	bool input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	ret = kstrtobool(buf, &input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	set_power_ctl_ee_state(input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) show_one(max_perf_pct, max_perf_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) show_one(min_perf_pct, min_perf_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) define_one_global_rw(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) define_one_global_rw(no_turbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) define_one_global_rw(max_perf_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) define_one_global_rw(min_perf_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) define_one_global_ro(turbo_pct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) define_one_global_ro(num_pstates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) define_one_global_rw(hwp_dynamic_boost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) define_one_global_rw(energy_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static struct attribute *intel_pstate_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	&status.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	&no_turbo.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	&turbo_pct.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	&num_pstates.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static const struct attribute_group intel_pstate_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	.attrs = intel_pstate_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static struct kobject *intel_pstate_kobject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static void __init intel_pstate_sysfs_expose_params(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 						&cpu_subsys.dev_root->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (WARN_ON(!intel_pstate_kobject))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	if (WARN_ON(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	 * If per cpu limits are enforced there are no global limits, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	 * return without creating max/min_perf_pct attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (per_cpu_limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static void __init intel_pstate_sysfs_remove(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (!intel_pstate_kobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (!per_cpu_limits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	kobject_put(intel_pstate_kobject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!hwp_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	WARN_ON_ONCE(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (!hwp_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /************************** sysfs end ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static void intel_pstate_hwp_enable(struct cpudata *cpudata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	/* First disable HWP notification interrupt as we don't process them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (cpudata->epp_default == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static int atom_get_min_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	return (value >> 8) & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int atom_get_max_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	return (value >> 16) & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static int atom_get_turbo_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	return value & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static u64 atom_get_val(struct cpudata *cpudata, int pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	int32_t vid_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	u32 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	val = (u64)pstate << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (global.no_turbo && !global.turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		val |= (u64)1 << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	vid_fp = cpudata->vid.min + mul_fp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		int_tofp(pstate - cpudata->pstate.min_pstate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		cpudata->vid.ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	vid = ceiling_fp(vid_fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (pstate > cpudata->pstate.max_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		vid = cpudata->vid.turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return val | vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static int silvermont_get_scaling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	/* Defined in Table 35-6 from SDM (Sept 2015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	static int silvermont_freq_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		83300, 100000, 133300, 116700, 80000};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	rdmsrl(MSR_FSB_FREQ, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	i = value & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	WARN_ON(i > 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	return silvermont_freq_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static int airmont_get_scaling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	/* Defined in Table 35-10 from SDM (Sept 2015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	static int airmont_freq_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		83300, 100000, 133300, 116700, 80000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		93300, 90000, 88900, 87500};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	rdmsrl(MSR_FSB_FREQ, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	i = value & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	WARN_ON(i > 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	return airmont_freq_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static void atom_get_vid(struct cpudata *cpudata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	rdmsrl(MSR_ATOM_CORE_VIDS, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	cpudata->vid.ratio = div_fp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		cpudata->vid.max - cpudata->vid.min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		int_tofp(cpudata->pstate.max_pstate -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			cpudata->pstate.min_pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	cpudata->vid.turbo = value & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static int core_get_min_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	rdmsrl(MSR_PLATFORM_INFO, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	return (value >> 40) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static int core_get_max_pstate_physical(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	rdmsrl(MSR_PLATFORM_INFO, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	return (value >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) static int core_get_tdp_ratio(u64 plat_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	/* Check how many TDP levels present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (plat_info & 0x600000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		u64 tdp_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		u64 tdp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		int tdp_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		/* Get the TDP level (0, 1, 2) to get ratios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		/* TDP MSR are continuous starting at 0x648 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		err = rdmsrl_safe(tdp_msr, &tdp_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		/* For level 1 and 2, bits[23:16] contain the ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		if (tdp_ctrl & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			tdp_ratio >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		tdp_ratio &= 0xff; /* ratios are only 8 bits long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		return (int)tdp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static int core_get_max_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	u64 tar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	u64 plat_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int max_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	int tdp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	rdmsrl(MSR_PLATFORM_INFO, plat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	max_pstate = (plat_info >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	tdp_ratio = core_get_tdp_ratio(plat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (tdp_ratio <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		return max_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		/* Turbo activation ratio is not used on HWP platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		return tdp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		int tar_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		/* Do some sanity checking for safety */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		tar_levels = tar & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		if (tdp_ratio - 1 == tar_levels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			max_pstate = tar_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			pr_debug("max_pstate=TAC %x\n", max_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return max_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static int core_get_turbo_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	int nont, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	nont = core_get_max_pstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	ret = (value) & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	if (ret <= nont)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		ret = nont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static inline int core_get_scaling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	return 100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static u64 core_get_val(struct cpudata *cpudata, int pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	val = (u64)pstate << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (global.no_turbo && !global.turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		val |= (u64)1 << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int knl_get_aperf_mperf_shift(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	return 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static int knl_get_turbo_pstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	int nont, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	nont = core_get_max_pstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	ret = (((value) >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	if (ret <= nont)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		ret = nont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	cpu->pstate.current_pstate = pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 * Generally, there is no guarantee that this code will always run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 * the CPU being updated, so force the register update to run on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	 * right CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		      pstate_funcs.get_val(cpu, pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static void intel_pstate_set_min_pstate(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static void intel_pstate_max_within_limits(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	intel_pstate_set_pstate(cpu, pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	cpu->pstate.min_pstate = pstate_funcs.get_min();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	cpu->pstate.scaling = pstate_funcs.get_scaling();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (hwp_active && !hwp_mode_bdw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		unsigned int phy_max, current_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		cpu->pstate.turbo_pstate = phy_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		cpu->pstate.max_pstate = pstate_funcs.get_max();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (pstate_funcs.get_aperf_mperf_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	if (pstate_funcs.get_vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		pstate_funcs.get_vid(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	intel_pstate_set_min_pstate(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * Long hold time will keep high perf limits for long time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * which negatively impacts perf/watt for some workloads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * like specpower. 3ms is based on experiements on some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * workoads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	u32 max_limit = (hwp_req & 0xff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	u32 min_limit = (hwp_req & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	u32 boost_level1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	 * Cases to consider (User changes via sysfs or boot time):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	 *	No boost, return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	 *     Should result in one level boost only for P0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	 *     Should result in two level boost:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	 *         (min + p1)/2 and P1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	 *     Should result in three level boost:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	 *        (min + p1)/2, P1 and P0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	/* If max and min are equal or already at max, nothing to boost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (!cpu->hwp_boost_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		cpu->hwp_boost_min = min_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	/* level at half way mark between min and guranteed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	if (cpu->hwp_boost_min < boost_level1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		cpu->hwp_boost_min = boost_level1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		 max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		cpu->hwp_boost_min = max_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	wrmsrl(MSR_HWP_REQUEST, hwp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	cpu->last_update = cpu->sample.time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	if (cpu->hwp_boost_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		bool expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		/* Check if we are idle for hold time to boost down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		expired = time_after64(cpu->sample.time, cpu->last_update +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 				       hwp_boost_hold_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		if (expired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			cpu->hwp_boost_min = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	cpu->last_update = cpu->sample.time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 						      u64 time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	cpu->sample.time = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		bool do_io = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		cpu->sched_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		 * Set iowait_boost flag and update time. Since IO WAIT flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		 * is set all the time, we can't just conclude that there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		 * some IO bound activity is scheduled on this CPU with just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		 * one occurrence. If we receive at least two in two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		 * consecutive ticks, then we treat as boost candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			do_io = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		cpu->last_io_update = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (do_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			intel_pstate_hwp_boost_up(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		intel_pstate_hwp_boost_down(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 						u64 time, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	cpu->sched_flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (smp_processor_id() == cpu->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		intel_pstate_update_util_hwp_local(cpu, time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	struct sample *sample = &cpu->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	u64 aperf, mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	u64 tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	rdmsrl(MSR_IA32_APERF, aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	rdmsrl(MSR_IA32_MPERF, mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	tsc = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	cpu->last_sample_time = cpu->sample.time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	cpu->sample.time = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	cpu->sample.aperf = aperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	cpu->sample.mperf = mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	cpu->sample.tsc =  tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	cpu->sample.aperf -= cpu->prev_aperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	cpu->sample.mperf -= cpu->prev_mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	cpu->sample.tsc -= cpu->prev_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	cpu->prev_aperf = aperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	cpu->prev_mperf = mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	cpu->prev_tsc = tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	 * First time this function is invoked in a given cycle, all of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	 * previous sample data fields are equal to zero or stale and they must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	 * be populated with meaningful numbers for things to work, so assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	 * that sample.time will always be reset before setting the utilization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	 * update hook and make the caller skip the sample then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	if (cpu->last_sample_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		intel_pstate_calc_avg_perf(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static inline int32_t get_avg_frequency(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static inline int32_t get_avg_pstate(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			  cpu->sample.core_avg_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static inline int32_t get_target_pstate(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	struct sample *sample = &cpu->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	int32_t busy_frac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	int target, avg_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			   sample->tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (busy_frac < cpu->iowait_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		busy_frac = cpu->iowait_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	sample->busy_scaled = busy_frac * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	target = global.no_turbo || global.turbo_disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	target += target >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	target = mul_fp(target, busy_frac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	if (target < cpu->pstate.min_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		target = cpu->pstate.min_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 * If the average P-state during the previous cycle was higher than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	 * current target, add 50% of the difference to the target to reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	 * possible performance oscillations and offset possible performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	 * loss related to moving the workload from one CPU to another within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	 * a package/module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	avg_pstate = get_avg_pstate(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (avg_pstate > target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		target += (avg_pstate - target) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	return target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	int max_pstate = max(min_pstate, cpu->max_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	return clamp_t(int, pstate, min_pstate, max_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	if (pstate == cpu->pstate.current_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	cpu->pstate.current_pstate = pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) static void intel_pstate_adjust_pstate(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	int from = cpu->pstate.current_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	struct sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	int target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	target_pstate = get_target_pstate(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	intel_pstate_update_pstate(cpu, target_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	sample = &cpu->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		fp_toint(sample->busy_scaled),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		cpu->pstate.current_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		sample->mperf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		sample->aperf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		sample->tsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		get_avg_frequency(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		fp_toint(cpu->iowait_boost * 100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) static void intel_pstate_update_util(struct update_util_data *data, u64 time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 				     unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	u64 delta_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	/* Don't allow remote callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (smp_processor_id() != cpu->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	delta_ns = time - cpu->last_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (flags & SCHED_CPUFREQ_IOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		/* Start over if the CPU may have been idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		if (delta_ns > TICK_NSEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			cpu->iowait_boost = ONE_EIGHTH_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			cpu->iowait_boost <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			if (cpu->iowait_boost > int_tofp(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 				cpu->iowait_boost = int_tofp(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			cpu->iowait_boost = ONE_EIGHTH_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	} else if (cpu->iowait_boost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		/* Clear iowait_boost if the CPU may have been idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		if (delta_ns > TICK_NSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			cpu->iowait_boost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			cpu->iowait_boost >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	cpu->last_update = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	delta_ns = time - cpu->sample.time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	if (intel_pstate_sample(cpu, time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		intel_pstate_adjust_pstate(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static struct pstate_funcs core_funcs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	.get_max = core_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	.get_max_physical = core_get_max_pstate_physical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	.get_min = core_get_min_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	.get_turbo = core_get_turbo_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	.get_scaling = core_get_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	.get_val = core_get_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static const struct pstate_funcs silvermont_funcs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	.get_max = atom_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	.get_max_physical = atom_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	.get_min = atom_get_min_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	.get_turbo = atom_get_turbo_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	.get_val = atom_get_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	.get_scaling = silvermont_get_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	.get_vid = atom_get_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static const struct pstate_funcs airmont_funcs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	.get_max = atom_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	.get_max_physical = atom_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	.get_min = atom_get_min_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	.get_turbo = atom_get_turbo_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	.get_val = atom_get_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	.get_scaling = airmont_get_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	.get_vid = atom_get_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static const struct pstate_funcs knl_funcs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	.get_max = core_get_max_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	.get_max_physical = core_get_max_pstate_physical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	.get_min = core_get_min_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	.get_turbo = knl_get_turbo_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	.get_scaling = core_get_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	.get_val = core_get_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) #define X86_MATCH(model, policy)					 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 					   X86_FEATURE_APERFMPERF, &policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	X86_MATCH(SANDYBRIDGE,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	X86_MATCH(SANDYBRIDGE_X,	core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	X86_MATCH(ATOM_SILVERMONT,	silvermont_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	X86_MATCH(IVYBRIDGE,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	X86_MATCH(HASWELL,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	X86_MATCH(BROADWELL,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	X86_MATCH(IVYBRIDGE_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	X86_MATCH(HASWELL_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	X86_MATCH(HASWELL_L,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	X86_MATCH(HASWELL_G,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	X86_MATCH(BROADWELL_G,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	X86_MATCH(ATOM_AIRMONT,		airmont_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	X86_MATCH(SKYLAKE_L,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	X86_MATCH(BROADWELL_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	X86_MATCH(SKYLAKE,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	X86_MATCH(BROADWELL_D,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	X86_MATCH(XEON_PHI_KNL,		knl_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	X86_MATCH(XEON_PHI_KNM,		knl_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	X86_MATCH(ATOM_GOLDMONT,	core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	X86_MATCH(ATOM_GOLDMONT_PLUS,	core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	X86_MATCH(SKYLAKE_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	X86_MATCH(BROADWELL_D,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	X86_MATCH(BROADWELL_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	X86_MATCH(SKYLAKE_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	X86_MATCH(KABYLAKE,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	X86_MATCH(SKYLAKE_X,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	X86_MATCH(SKYLAKE,		core_funcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static int intel_pstate_init_cpu(unsigned int cpunum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	cpu = all_cpu_data[cpunum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (!cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		if (!cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		all_cpu_data[cpunum] = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		cpu->cpu = cpunum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		cpu->epp_default = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			intel_pstate_hwp_enable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 			id = x86_match_cpu(intel_pstate_hwp_boost_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 			if (id && intel_pstate_acpi_pm_profile_server())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 				hwp_boost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	} else if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		 * Re-enable HWP in case this happens after a resume from ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		 * S3 if the CPU was offline during the whole system/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		 * cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		intel_pstate_hwp_reenable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	cpu->epp_powersave = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	cpu->epp_policy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	intel_pstate_get_cpu_pstates(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	pr_debug("controlling: cpu %d\n", cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	struct cpudata *cpu = all_cpu_data[cpu_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (hwp_active && !hwp_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	if (cpu->update_util_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	/* Prevent intel_pstate_update_util() from using stale data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	cpu->sample.time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 				     (hwp_active ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				      intel_pstate_update_util_hwp :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 				      intel_pstate_update_util));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	cpu->update_util_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static void intel_pstate_clear_update_util_hook(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	struct cpudata *cpu_data = all_cpu_data[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	if (!cpu_data->update_util_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	cpufreq_remove_update_util_hook(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	cpu_data->update_util_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) static int intel_pstate_get_max_freq(struct cpudata *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	return global.turbo_disabled || global.no_turbo ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static void intel_pstate_update_perf_limits(struct cpudata *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 					    unsigned int policy_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 					    unsigned int policy_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	int32_t max_policy_perf, min_policy_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	int max_state, turbo_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	int max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	 * HWP needs some special consideration, because on BDX the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	 * HWP_REQUEST uses abstract value to represent performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	 * rather than pure ratios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		max_state = global.no_turbo || global.turbo_disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		turbo_max = cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	max_freq = max_state * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	max_policy_perf = max_state * policy_max / max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	if (policy_max == policy_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		min_policy_perf = max_policy_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		min_policy_perf = max_state * policy_min / max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 					  0, max_policy_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		 cpu->cpu, max_state, min_policy_perf, max_policy_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	/* Normalize user input to [min_perf, max_perf] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	if (per_cpu_limits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		cpu->min_perf_ratio = min_policy_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		cpu->max_perf_ratio = max_policy_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		int32_t global_min, global_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		/* Global limits are in percent of the maximum turbo P-state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		global_min = clamp_t(int32_t, global_min, 0, global_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			 global_min, global_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		cpu->max_perf_ratio = min(max_policy_perf, global_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		/* Make sure min_perf <= max_perf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 					  cpu->max_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		 cpu->max_perf_ratio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		 cpu->min_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static int intel_pstate_set_policy(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	if (!policy->cpuinfo.max_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		 policy->cpuinfo.max_freq, policy->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	cpu->policy = policy->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	mutex_lock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		 * NOHZ_FULL CPUs need this as the governor callback may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		 * be invoked on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		intel_pstate_clear_update_util_hook(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		intel_pstate_max_within_limits(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		intel_pstate_set_update_util_hook(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		 * When hwp_boost was active before and dynamically it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		 * was turned off, in that case we need to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		 * update util hook.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		if (!hwp_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			intel_pstate_clear_update_util_hook(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		intel_pstate_hwp_set(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	mutex_unlock(&intel_pstate_limits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 					   struct cpufreq_policy_data *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	if (!hwp_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	    policy->max < policy->cpuinfo.max_freq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	    policy->max > cpu->pstate.max_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		pr_debug("policy->max > max non turbo frequency\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		policy->max = policy->cpuinfo.max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 					   struct cpufreq_policy_data *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	int max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		int max_state, turbo_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		max_freq = max_state * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		max_freq = intel_pstate_get_max_freq(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	intel_pstate_adjust_policy_max(cpu, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	pr_debug("CPU %d going offline\n", cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	if (cpu->suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 * If the CPU is an SMT thread and it goes offline with the performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 * settings different from the minimum, it will prevent its sibling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * from getting to lower performance levels, so force the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 * performance on CPU offline to prevent that from happening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (hwp_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		intel_pstate_hwp_offline(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		intel_pstate_set_min_pstate(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	intel_pstate_exit_perf_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	pr_debug("CPU %d going online\n", cpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	intel_pstate_init_acpi_perf_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		 * Re-enable HWP and clear the "suspended" flag to let "resume"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		 * know that it need not do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		intel_pstate_hwp_reenable(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		cpu->suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	pr_debug("CPU %d stopping\n", policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	intel_pstate_clear_update_util_hook(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	pr_debug("CPU %d exiting\n", policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	policy->fast_switch_possible = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	rc = intel_pstate_init_cpu(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	cpu->max_perf_ratio = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	cpu->min_perf_ratio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	/* cpuinfo and default policy values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	global.turbo_disabled_mf = global.turbo_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	policy->cpuinfo.max_freq = global.turbo_disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		unsigned int max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		max_freq = global.turbo_disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		if (max_freq < policy->cpuinfo.max_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			policy->cpuinfo.max_freq = max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	intel_pstate_init_acpi_perf_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	policy->fast_switch_possible = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	int ret = __intel_pstate_cpu_init(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	 * Set the policy to powersave to provide a valid fallback value in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	 * the default cpufreq governor is neither powersave nor performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	policy->policy = CPUFREQ_POLICY_POWERSAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) static struct cpufreq_driver intel_pstate = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	.flags		= CPUFREQ_CONST_LOOPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	.verify		= intel_pstate_verify_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	.setpolicy	= intel_pstate_set_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	.suspend	= intel_pstate_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	.resume		= intel_pstate_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	.init		= intel_pstate_cpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	.exit		= intel_pstate_cpu_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	.stop_cpu	= intel_pstate_stop_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	.offline	= intel_pstate_cpu_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	.online		= intel_pstate_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	.update_limits	= intel_pstate_update_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	.name		= "intel_pstate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	intel_pstate_verify_cpu_policy(cpu, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Use of trace in passive mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)  * In passive mode the trace core_busy field (also known as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)  * performance field, and lablelled as such on the graphs; also known as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  * core_avg_perf) is not needed and so is re-assigned to indicate if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * driver call was via the normal or fast switch path. Various graphs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  * output from the intel_pstate_tracer.py utility that include core_busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  * so we use 10 to indicate the the normal path through the driver, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)  * 90 to indicate the fast switch path through the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)  * The scaled_busy field is not used, and is set to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) #define	INTEL_PSTATE_TRACE_TARGET 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) #define	INTEL_PSTATE_TRACE_FAST_SWITCH 90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	struct sample *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	if (!trace_pstate_sample_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	if (!intel_pstate_sample(cpu, ktime_get()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	sample = &cpu->sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	trace_pstate_sample(trace_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		old_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		cpu->pstate.current_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		sample->mperf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		sample->aperf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		sample->tsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		get_avg_frequency(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		fp_toint(cpu->iowait_boost * 100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 				     bool strict, bool fast_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	value &= ~HWP_MIN_PERF(~0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	value |= HWP_MIN_PERF(target_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	 * The entire MSR needs to be updated in order to update the HWP min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	 * field in it, so opportunistically update the max too if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	value &= ~HWP_MAX_PERF(~0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (value == prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	WRITE_ONCE(cpu->hwp_req_cached, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	if (fast_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		wrmsrl(MSR_HWP_REQUEST, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 					  u32 target_pstate, bool fast_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (fast_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		wrmsrl(MSR_IA32_PERF_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		       pstate_funcs.get_val(cpu, target_pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			      pstate_funcs.get_val(cpu, target_pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 				       int target_pstate, bool fast_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	int old_pstate = cpu->pstate.current_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		intel_cpufreq_adjust_hwp(cpu, target_pstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 					 policy->strict_target, fast_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		cpu->pstate.current_pstate = target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	} else if (target_pstate != old_pstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		cpu->pstate.current_pstate = target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 			    INTEL_PSTATE_TRACE_TARGET, old_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	return target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) static int intel_cpufreq_target(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 				unsigned int target_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 				unsigned int relation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	struct cpufreq_freqs freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	int target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	freqs.old = policy->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	freqs.new = target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	cpufreq_freq_transition_begin(policy, &freqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	switch (relation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	case CPUFREQ_RELATION_L:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	case CPUFREQ_RELATION_H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		target_pstate = freqs.new / cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	freqs.new = target_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	cpufreq_freq_transition_end(policy, &freqs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 					      unsigned int target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	struct cpudata *cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	int target_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	update_turbo_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	return target_pstate * cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	int max_state, turbo_max, min_freq, max_freq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	struct freq_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	struct cpudata *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	dev = get_cpu_device(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	ret = __intel_pstate_cpu_init(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	policy->cur = policy->cpuinfo.min_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	req = kcalloc(2, sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		goto pstate_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	cpu = all_cpu_data[policy->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		WRITE_ONCE(cpu->hwp_req_cached, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		cpu->epp_cached = intel_pstate_get_epp(cpu, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		turbo_max = cpu->pstate.turbo_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	min_freq *= cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	max_freq *= cpu->pstate.scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 				   min_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		goto free_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 				   max_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		goto remove_min_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	policy->driver_data = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) remove_min_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	freq_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) free_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) pstate_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	intel_pstate_exit_perf_limits(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	struct freq_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	req = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	freq_qos_remove_request(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	freq_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	return intel_pstate_cpu_exit(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) static struct cpufreq_driver intel_cpufreq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	.flags		= CPUFREQ_CONST_LOOPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	.verify		= intel_cpufreq_verify_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	.target		= intel_cpufreq_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	.fast_switch	= intel_cpufreq_fast_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	.init		= intel_cpufreq_cpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	.exit		= intel_cpufreq_cpu_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	.offline	= intel_pstate_cpu_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	.online		= intel_pstate_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	.suspend	= intel_pstate_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	.resume		= intel_pstate_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	.update_limits	= intel_pstate_update_limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	.name		= "intel_cpufreq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static struct cpufreq_driver *default_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) static void intel_pstate_driver_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		if (all_cpu_data[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			if (intel_pstate_driver == &intel_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 				intel_pstate_clear_update_util_hook(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			kfree(all_cpu_data[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 			all_cpu_data[cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	intel_pstate_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) static int intel_pstate_register_driver(struct cpufreq_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	if (driver == &intel_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		intel_pstate_sysfs_expose_hwp_dynamic_boost();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	memset(&global, 0, sizeof(global));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	global.max_perf_pct = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	intel_pstate_driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	ret = cpufreq_register_driver(intel_pstate_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		intel_pstate_driver_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	global.min_perf_pct = min_perf_pct_min();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) static ssize_t intel_pstate_show_status(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	if (!intel_pstate_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		return sprintf(buf, "off\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 					"active" : "passive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) static int intel_pstate_update_status(const char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	if (size == 3 && !strncmp(buf, "off", size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		if (!intel_pstate_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		if (hwp_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		cpufreq_unregister_driver(intel_pstate_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		intel_pstate_driver_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	if (size == 6 && !strncmp(buf, "active", size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		if (intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 			if (intel_pstate_driver == &intel_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			cpufreq_unregister_driver(intel_pstate_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		return intel_pstate_register_driver(&intel_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	if (size == 7 && !strncmp(buf, "passive", size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		if (intel_pstate_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			if (intel_pstate_driver == &intel_cpufreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			cpufreq_unregister_driver(intel_pstate_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 			intel_pstate_sysfs_hide_hwp_dynamic_boost();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		return intel_pstate_register_driver(&intel_cpufreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) static int no_load __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static int no_hwp __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) static int hwp_only __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) static unsigned int force_load __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) static int __init intel_pstate_msrs_not_valid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	if (!pstate_funcs.get_max() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	    !pstate_funcs.get_min() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	    !pstate_funcs.get_turbo())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	pstate_funcs.get_max   = funcs->get_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	pstate_funcs.get_max_physical = funcs->get_max_physical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	pstate_funcs.get_min   = funcs->get_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	pstate_funcs.get_turbo = funcs->get_turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	pstate_funcs.get_scaling = funcs->get_scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	pstate_funcs.get_val   = funcs->get_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	pstate_funcs.get_vid   = funcs->get_vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) static bool __init intel_pstate_no_acpi_pss(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		union acpi_object *pss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		struct acpi_processor *pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		pss = buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 			kfree(pss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		kfree(pss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	pr_debug("ACPI _PSS not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) static bool __init intel_pstate_no_acpi_pcch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	acpi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	status = acpi_get_handle(NULL, "\\_SB", &handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	if (acpi_has_method(handle, "PCCH"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	pr_debug("ACPI PCCH not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) static bool __init intel_pstate_has_acpi_ppc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		struct acpi_processor *pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		if (acpi_has_method(pr->handle, "_PPC"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	pr_debug("ACPI _PPC not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	PSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	PPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) /* Hardware vendor-specific info that has its own power management modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) static struct acpi_platform_list plat_info[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	{"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	{"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	{"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	{"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	{"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	{"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	{"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	{"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	{"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	{"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	{"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	{"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	{"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	{"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	{"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	{ } /* End */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) #define BITMASK_OOB	(BIT(8) | BIT(18))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	u64 misc_pwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	if (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		if (misc_pwr & BITMASK_OOB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 			pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 			pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	idx = acpi_match_platform_list(plat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	switch (plat_info[idx].data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	case PSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		if (!intel_pstate_no_acpi_pss())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		return intel_pstate_no_acpi_pcch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	case PPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		return intel_pstate_has_acpi_ppc() && !force_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) static void intel_pstate_request_control_from_smm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	 * It may be unsafe to request P-states control from SMM if _PPC support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	 * has not been enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	if (acpi_ppc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		acpi_processor_pstate_control();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) #else /* CONFIG_ACPI not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static inline void intel_pstate_request_control_from_smm(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) #endif /* CONFIG_ACPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) #define INTEL_PSTATE_HWP_BROADWELL	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) #define X86_MATCH_HWP(model, hwp_mode)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 					   X86_FEATURE_HWP, hwp_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static const struct x86_cpu_id hwp_support_ids[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	X86_MATCH_HWP(BROADWELL_X,	INTEL_PSTATE_HWP_BROADWELL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	X86_MATCH_HWP(BROADWELL_D,	INTEL_PSTATE_HWP_BROADWELL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	X86_MATCH_HWP(ANY,		0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) static bool intel_pstate_hwp_is_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	rdmsrl(MSR_PM_ENABLE, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	return !!(value & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) static int __init intel_pstate_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	id = x86_match_cpu(hwp_support_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	if (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		bool hwp_forced = intel_pstate_hwp_is_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		if (hwp_forced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 			pr_info("HWP enabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		else if (no_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		copy_cpu_funcs(&core_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		 * Avoid enabling HWP for processors without EPP support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		 * because that means incomplete HWP implementation which is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		 * corner case and supporting it is generally problematic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		 * If HWP is enabled already, though, there is no choice but to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		 * deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 			hwp_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 			hwp_mode_bdw = id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 			intel_pstate.attr = hwp_cpufreq_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 			intel_cpufreq.attr = hwp_cpufreq_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 			intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			if (!default_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 				default_driver = &intel_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 			goto hwp_cpu_matched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 		pr_info("HWP not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 		if (no_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 		id = x86_match_cpu(intel_pstate_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 		if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 			pr_info("CPU model not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	if (intel_pstate_msrs_not_valid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		pr_info("Invalid MSRs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	/* Without HWP start in the passive mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	if (!default_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		default_driver = &intel_cpufreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) hwp_cpu_matched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	 * The Intel pstate driver will be ignored if the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	 * firmware has its own power management modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	if (intel_pstate_platform_pwr_mgmt_exists()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		pr_info("P-states controlled by the platform\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	if (!hwp_active && hwp_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	pr_info("Intel P-state driver initializing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	if (!all_cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	intel_pstate_request_control_from_smm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	intel_pstate_sysfs_expose_params();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	mutex_lock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	rc = intel_pstate_register_driver(default_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	mutex_unlock(&intel_pstate_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		intel_pstate_sysfs_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	if (hwp_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		if (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 			set_power_ctl_ee_state(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 			pr_info("Disabling energy efficiency optimization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		pr_info("HWP enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) device_initcall(intel_pstate_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) static int __init intel_pstate_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	if (!strcmp(str, "disable"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		no_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	else if (!strcmp(str, "active"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		default_driver = &intel_pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	else if (!strcmp(str, "passive"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		default_driver = &intel_cpufreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	if (!strcmp(str, "no_hwp"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		no_hwp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	if (!strcmp(str, "force"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		force_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	if (!strcmp(str, "hwp_only"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		hwp_only = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	if (!strcmp(str, "per_cpu_perf_limits"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		per_cpu_limits = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	if (!strcmp(str, "support_acpi_ppc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		acpi_ppc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) early_param("intel_pstate", intel_pstate_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) MODULE_LICENSE("GPL");