Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * acpi-cpufreq.c - ACPI Processor P-States Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <acpi/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <acpi/cppc_acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) MODULE_DESCRIPTION("ACPI Processor P-States Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	UNDEFINED_CAPABLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	SYSTEM_INTEL_MSR_CAPABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	SYSTEM_AMD_MSR_CAPABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	SYSTEM_IO_CAPABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define INTEL_MSR_RANGE		(0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define AMD_MSR_RANGE		(0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define HYGON_MSR_RANGE		(0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) struct acpi_cpufreq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	unsigned int resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	unsigned int cpu_feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	unsigned int acpi_perf_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	cpumask_var_t freqdomain_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* acpi_perf_data is a pointer to percpu data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static struct acpi_processor_performance __percpu *acpi_perf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static struct cpufreq_driver acpi_cpufreq_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static unsigned int acpi_pstate_strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static bool boost_state(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	u32 lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	switch (boot_cpu_data.x86_vendor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	case X86_VENDOR_INTEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		msr = lo | ((u64)hi << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	case X86_VENDOR_HYGON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	case X86_VENDOR_AMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		msr = lo | ((u64)hi << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		return !(msr & MSR_K7_HWCR_CPB_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static int boost_set_msr(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	u32 msr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	u64 msr_mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	switch (boot_cpu_data.x86_vendor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	case X86_VENDOR_INTEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		msr_addr = MSR_IA32_MISC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	case X86_VENDOR_HYGON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	case X86_VENDOR_AMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		msr_addr = MSR_K7_HWCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		msr_mask = MSR_K7_HWCR_CPB_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	rdmsrl(msr_addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		val &= ~msr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		val |= msr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	wrmsrl(msr_addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void boost_set_msr_each(void *p_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	bool enable = (bool) p_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	boost_set_msr(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int set_boost(struct cpufreq_policy *policy, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	on_each_cpu_mask(policy->cpus, boost_set_msr_each,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			 (void *)(long)val, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		 cpumask_pr_args(policy->cpus), val ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (unlikely(!data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) cpufreq_freq_attr_ro(freqdomain_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			 size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (!acpi_cpufreq_driver.set_boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	ret = kstrtouint(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (ret || val > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	set_boost(policy, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) cpufreq_freq_attr_rw(cpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static int check_est_cpu(unsigned int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	return cpu_has(cpu, X86_FEATURE_EST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static int check_amd_hwpstate_cpu(unsigned int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	for (i = 0; i < perf->state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		if (value == perf->states[i].status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			return policy->freq_table[i].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct cpufreq_frequency_table *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		msr &= AMD_MSR_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		msr &= HYGON_MSR_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		msr &= INTEL_MSR_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	cpufreq_for_each_entry(pos, policy->freq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		if (msr == perf->states[pos->driver_data].status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			return pos->frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	return policy->freq_table[0].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	switch (data->cpu_feature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	case SYSTEM_INTEL_MSR_CAPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	case SYSTEM_AMD_MSR_CAPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return extract_msr(policy, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	case SYSTEM_IO_CAPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		return extract_io(policy, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 val, dummy __always_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	rdmsr(MSR_IA32_PERF_CTL, val, dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	u32 lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	rdmsr(MSR_IA32_PERF_CTL, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	wrmsr(MSR_IA32_PERF_CTL, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	u32 val, dummy __always_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	rdmsr(MSR_AMD_PERF_CTL, val, dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	wrmsr(MSR_AMD_PERF_CTL, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	acpi_os_read_port(reg->address, &val, reg->bit_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	acpi_os_write_port(reg->address, val, reg->bit_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) struct drv_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct acpi_pct_register *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		void (*write)(struct acpi_pct_register *reg, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		u32 (*read)(struct acpi_pct_register *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	} func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) /* Called via smp_call_function_single(), on the target CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static void do_drv_read(void *_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct drv_cmd *cmd = _cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	cmd->val = cmd->func.read(cmd->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct acpi_processor_performance *perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	struct drv_cmd cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		.reg = &perf->control_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		.func.read = data->cpu_freq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	return cmd.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) /* Called via smp_call_function_many(), on the target CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) static void do_drv_write(void *_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct drv_cmd *cmd = _cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	cmd->func.write(cmd->reg, cmd->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static void drv_write(struct acpi_cpufreq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		      const struct cpumask *mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct acpi_processor_performance *perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct drv_cmd cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		.reg = &perf->control_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		.val = val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		.func.write = data->cpu_freq_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	int this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	this_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	if (cpumask_test_cpu(this_cpu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		do_drv_write(&cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	smp_call_function_many(mask, do_drv_write, &cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (unlikely(cpumask_empty(mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	val = drv_read(data, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	pr_debug("%s = %u\n", __func__, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct acpi_cpufreq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	unsigned int freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	unsigned int cached_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	pr_debug("%s (%d)\n", __func__, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	policy = cpufreq_cpu_get_raw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (unlikely(!policy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (unlikely(!data || !policy->freq_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	if (freq != cached_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 * The dreaded BIOS frequency change behind our back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		 * Force set the frequency on next target call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		data->resume = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	pr_debug("cur freq = %u\n", freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) static unsigned int check_freqs(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				const struct cpumask *mask, unsigned int freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	unsigned int cur_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		cur_freq = extract_freq(policy, get_cur_val(mask, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		if (cur_freq == freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static int acpi_cpufreq_target(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			       unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	const struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	unsigned int next_perf_state = 0; /* Index into perf table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (unlikely(!data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	next_perf_state = policy->freq_table[index].driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (perf->state == next_perf_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		if (unlikely(data->resume)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			pr_debug("Called after resume, resetting to P%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				next_perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			data->resume = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			pr_debug("Already at target state (P%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				next_perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 * The core won't allow CPUs to go away until the governor has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	 * stopped, so we can rely on the stability of policy->cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		cpumask_of(policy->cpu) : policy->cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	drv_write(data, mask, perf->states[next_perf_state].control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (acpi_pstate_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		if (!check_freqs(policy, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				 policy->freq_table[index].frequency)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			pr_debug("%s (%d)\n", __func__, policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			result = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		perf->state = next_perf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 					     unsigned int target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct cpufreq_frequency_table *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	unsigned int next_perf_state, next_freq, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 * Find the closest frequency above target_freq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (policy->cached_target_freq == target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		index = policy->cached_resolved_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		index = cpufreq_table_find_index_dl(policy, target_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	entry = &policy->freq_table[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	next_freq = entry->frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	next_perf_state = entry->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (perf->state == next_perf_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		if (unlikely(data->resume))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			data->resume = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			return next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	data->cpu_freq_write(&perf->control_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			     perf->states[next_perf_state].control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	perf->state = next_perf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	return next_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	perf = to_perf_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (cpu_khz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		/* search the closest match to cpu_khz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		unsigned long freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		unsigned long freqn = perf->states[0].core_frequency * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		for (i = 0; i < (perf->state_count-1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			freq = freqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			freqn = perf->states[i+1].core_frequency * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			if ((2 * cpu_khz) > (freqn + freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				perf->state = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 				return freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		perf->state = perf->state_count-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return freqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		/* assume CPU is at P0... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		perf->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return perf->states[0].core_frequency * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static void free_acpi_perf_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				 ->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	free_percpu(acpi_perf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) static int cpufreq_boost_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 * On the CPU_UP path we simply keep the boost-disable flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * in sync with the current global state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static int cpufreq_boost_down_prep(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * Clear the boost-disable bit on the CPU_DOWN path so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 * this cpu cannot block the remaining ones from boosting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	return boost_set_msr(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * acpi_cpufreq_early_init - initialize ACPI P-States library
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * in order to determine correct frequency and voltage pairings. We can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * do _PDC and _PSD and find out the processor dependency for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * actual init that will happen later...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static int __init acpi_cpufreq_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (!acpi_perf_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		pr_debug("Memory allocation error for acpi_perf_data.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if (!zalloc_cpumask_var_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			GFP_KERNEL, cpu_to_node(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			free_acpi_perf_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/* Do initialization in ACPI core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	acpi_processor_preregister_performance(acpi_perf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * Some BIOSes do SW_ANY coordination internally, either set it up in hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * or do it in BIOS firmware and won't inform about it to OS. If not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * detected, this has a side effect of making CPU run at a different speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * than OS intended it to run at. Detect it and handle it cleanly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static int bios_with_sw_any_bug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static int sw_any_bug_found(const struct dmi_system_id *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	bios_with_sw_any_bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static const struct dmi_system_id sw_any_bug_dmi_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		.callback = sw_any_bug_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		.ident = "Supermicro Server X6DLP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		.matches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/* Intel Xeon Processor 7100 Series Specification Update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * AL30: A Machine Check Exception (MCE) Occurring during an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * Both Processor Cores to Lock Up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (c->x86_vendor == X86_VENDOR_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if ((c->x86 == 15) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		    (c->x86_model == 6) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		    (c->x86_stepping == 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		    }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) #ifdef CONFIG_ACPI_CPPC_LIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static u64 get_max_boost_ratio(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	struct cppc_perf_caps perf_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	u64 highest_perf, nominal_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (acpi_pstate_strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	ret = cppc_get_perf_caps(cpu, &perf_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			 cpu, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	highest_perf = perf_caps.highest_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	nominal_perf = perf_caps.nominal_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (!highest_perf || !nominal_perf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (highest_perf < nominal_perf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		pr_debug("CPU%d: nominal performance above highest\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	struct cpufreq_frequency_table *freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	struct acpi_processor_performance *perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct acpi_cpufreq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	unsigned int cpu = policy->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	unsigned int valid_states = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	unsigned int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	u64 max_boost_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	static int blacklisted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (blacklisted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return blacklisted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	blacklisted = acpi_cpufreq_blacklist(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (blacklisted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		return blacklisted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	perf = per_cpu_ptr(acpi_perf_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	data->acpi_perf_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	policy->driver_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	result = acpi_processor_register_performance(perf, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		goto err_free_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	policy->shared_type = perf->shared_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * Will let policy->cpus know about dependency only when software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * coordination is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		cpumask_copy(policy->cpus, perf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	dmi_check_system(sw_any_bug_dmi_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	    !acpi_pstate_strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		cpumask_clear(policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		cpumask_set_cpu(cpu, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		cpumask_copy(data->freqdomain_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			     topology_sibling_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		pr_info_once("overriding BIOS provided _PSD data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/* capability check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (perf->state_count <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		pr_debug("No P-States\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		result = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (perf->control_register.space_id != perf->status_register.space_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		result = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	switch (perf->control_register.space_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	case ACPI_ADR_SPACE_SYSTEM_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		    boot_cpu_data.x86 == 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			pr_debug("AMD K8 systems must use native drivers.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			result = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		pr_debug("SYSTEM IO addr space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		data->cpu_feature = SYSTEM_IO_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		data->cpu_freq_read = cpu_freq_read_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		data->cpu_freq_write = cpu_freq_write_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		pr_debug("HARDWARE addr space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		if (check_est_cpu(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			data->cpu_freq_read = cpu_freq_read_intel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			data->cpu_freq_write = cpu_freq_write_intel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		if (check_amd_hwpstate_cpu(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			data->cpu_freq_read = cpu_freq_read_amd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			data->cpu_freq_write = cpu_freq_write_amd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		result = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		pr_debug("Unknown addr space %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			(u32) (perf->control_register.space_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		result = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (!freq_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		goto err_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* detect transition latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	policy->cpuinfo.transition_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	for (i = 0; i < perf->state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if ((perf->states[i].transition_latency * 1000) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		    policy->cpuinfo.transition_latency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			policy->cpuinfo.transition_latency =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			    perf->states[i].transition_latency * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	    policy->cpuinfo.transition_latency > 20 * 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		policy->cpuinfo.transition_latency = 20 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		pr_info_once("P-state transition latency capped at 20 uS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	/* table init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	for (i = 0; i < perf->state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		if (i > 0 && perf->states[i].core_frequency >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		    freq_table[valid_states-1].frequency / 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		freq_table[valid_states].driver_data = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		freq_table[valid_states].frequency =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		    perf->states[i].core_frequency * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		valid_states++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	max_boost_ratio = get_max_boost_ratio(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (max_boost_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		unsigned int freq = freq_table[0].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		 * Because the loop above sorts the freq_table entries in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 * descending order, freq is the maximum frequency in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 * Assume that it corresponds to the CPPC nominal frequency and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 * use it to set cpuinfo.max_freq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		 * If the maximum "boost" frequency is unknown, ask the arch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		 * scale-invariance code to use the "nominal" performance for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		 * CPU utilization scaling so as to prevent the schedutil
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		 * governor from selecting inadequate CPU frequencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		arch_set_max_freq_ratio(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	policy->freq_table = freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	perf->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	switch (perf->control_register.space_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	case ACPI_ADR_SPACE_SYSTEM_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 * The core will not set policy->cur, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		 * cpufreq_driver->get is NULL, so we need to set it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		 * However, we have to guess it, because the current speed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		 * unknown and not detectable via IO ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* notify BIOS that we exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	acpi_processor_notify_smm(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	for (i = 0; i < perf->state_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			(i == perf->state ? '*' : ' '), i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			(u32) perf->states[i].core_frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			(u32) perf->states[i].power,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			(u32) perf->states[i].transition_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * the first call to ->target() should result in us actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * writing something to the appropriate registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	data->resume = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	policy->fast_switch_possible = !acpi_pstate_strict &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) err_unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	acpi_processor_unregister_performance(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) err_free_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	free_cpumask_var(data->freqdomain_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	policy->driver_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	policy->fast_switch_possible = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	policy->driver_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	acpi_processor_unregister_performance(data->acpi_perf_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	free_cpumask_var(data->freqdomain_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	kfree(policy->freq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 							      policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	unsigned int freq = policy->freq_table[0].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (perf->states[0].core_frequency * 1000 != freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		pr_warn(FW_WARN "P-state 0 is not max freq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct acpi_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	data->resume = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static struct freq_attr *acpi_cpufreq_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	&cpufreq_freq_attr_scaling_available_freqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	&freqdomain_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	&cpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static struct cpufreq_driver acpi_cpufreq_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	.verify		= cpufreq_generic_frequency_table_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	.target_index	= acpi_cpufreq_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	.fast_switch	= acpi_cpufreq_fast_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	.bios_limit	= acpi_processor_get_bios_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	.init		= acpi_cpufreq_cpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	.exit		= acpi_cpufreq_cpu_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	.ready		= acpi_cpufreq_cpu_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	.resume		= acpi_cpufreq_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	.name		= "acpi-cpufreq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	.attr		= acpi_cpufreq_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static enum cpuhp_state acpi_cpufreq_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void __init acpi_cpufreq_boost_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		pr_debug("Boost capabilities not present in the processor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	acpi_cpufreq_driver.set_boost = set_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	acpi_cpufreq_driver.boost_enabled = boost_state(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * This calls the online callback on all online cpu and forces all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 * MSRs to the same value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				cpufreq_boost_online, cpufreq_boost_down_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	acpi_cpufreq_online = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static void acpi_cpufreq_boost_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (acpi_cpufreq_online > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		cpuhp_remove_state_nocalls(acpi_cpufreq_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) static int __init acpi_cpufreq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* don't keep reloading if cpufreq_driver exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (cpufreq_get_current_driver())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	ret = acpi_cpufreq_early_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* this is a sysfs file with a strange name and an even stranger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * semantic - per CPU instantiation, but system global effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * Lets enable it only on AMD CPUs for compatibility reasons and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 * only if configured. This is considered legacy code, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * will probably be removed at some point in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (!check_amd_hwpstate_cpu(0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		struct freq_attr **attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		pr_debug("CPB unsupported, do not expose it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		for (attr = acpi_cpufreq_attr; *attr; attr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			if (*attr == &cpb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				*attr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	acpi_cpufreq_boost_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		free_acpi_perf_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		acpi_cpufreq_boost_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static void __exit acpi_cpufreq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	acpi_cpufreq_boost_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	cpufreq_unregister_driver(&acpi_cpufreq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	free_acpi_perf_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) module_param(acpi_pstate_strict, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) MODULE_PARM_DESC(acpi_pstate_strict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	"value 0 or non-zero. non-zero -> strict ACPI checks are "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	"performed during frequency changes.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) late_initcall(acpi_cpufreq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) module_exit(acpi_cpufreq_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	{ACPI_PROCESSOR_OBJECT_HID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	{ACPI_PROCESSOR_DEVICE_HID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) MODULE_DEVICE_TABLE(acpi, processor_device_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) MODULE_ALIAS("acpi");