Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * processor_throttling.c - Throttling submodule of the ACPI processor driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  			- Added processor hotplug support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <acpi/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define PREFIX "ACPI: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define ACPI_PROCESSOR_CLASS            "processor"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) ACPI_MODULE_NAME("processor_throttling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) /* ignore_tpc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *  0 -> acpi processor driver doesn't ignore _TPC values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *  1 -> acpi processor driver ignores _TPC values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static int ignore_tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) module_param(ignore_tpc, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) struct throttling_tstate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	unsigned int cpu;		/* cpu nr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	int target_state;		/* target T-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) struct acpi_processor_throttling_arg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct acpi_processor *pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	int target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	bool force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define THROTTLING_PRECHANGE       (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define THROTTLING_POSTCHANGE      (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static int acpi_processor_get_throttling(struct acpi_processor *pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static int __acpi_processor_set_throttling(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 					   int state, bool force, bool direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static int acpi_processor_update_tsd_coord(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	int count, count_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	cpumask_var_t covered_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct acpi_processor *pr, *match_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct acpi_tsd_package *pdomain, *match_pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	 * Now that we have _TSD data from all CPUs, lets setup T-state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	 * coordination between all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		/* Basic validity check for domain info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		pthrottling = &(pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		 * If tsd package for one cpu is invalid, the coordination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		 * among all CPUs is thought as invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		 * Maybe it is ugly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		if (!pthrottling->tsd_valid_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		if (cpumask_test_cpu(i, covered_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		pthrottling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		pdomain = &(pthrottling->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		cpumask_set_cpu(i, covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		 * If the number of processor in the TSD domain is 1, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		 * unnecessary to parse the coordination for this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		if (pdomain->num_processors <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		/* Validate the Domain info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		count_target = pdomain->num_processors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		for_each_possible_cpu(j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			if (i == j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			match_pr = per_cpu(processors, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			if (!match_pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			match_pthrottling = &(match_pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			match_pdomain = &(match_pthrottling->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			if (match_pdomain->domain != pdomain->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			/* Here i and j are in the same domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			 * If two TSD packages have the same domain, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			 * should have the same num_porcessors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			 * coordination type. Otherwise it will be regarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			 * as illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			if (match_pdomain->num_processors != count_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 				retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 				goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			if (pdomain->coord_type != match_pdomain->coord_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 				retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 				goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			cpumask_set_cpu(j, covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		for_each_possible_cpu(j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			if (i == j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			match_pr = per_cpu(processors, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			if (!match_pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			match_pthrottling = &(match_pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			match_pdomain = &(match_pthrottling->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			if (match_pdomain->domain != pdomain->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			 * If some CPUS have the same domain, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			 * will have the same shared_cpu_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			cpumask_copy(match_pthrottling->shared_cpu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				     pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) err_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	free_cpumask_var(covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		 * Assume no coordination on any error parsing domain info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * The coordination type will be forced as SW_ALL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			pthrottling = &(pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			cpumask_clear(pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * Update the T-state coordination after the _TSD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * data for all cpus is obtained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) void acpi_processor_throttling_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (acpi_processor_update_tsd_coord()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			"Assume no T-state coordination\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static int acpi_processor_throttling_notifier(unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct throttling_tstate *p_tstate = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct acpi_processor *pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	unsigned int cpu ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	int target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct acpi_processor_limit *p_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct acpi_processor_throttling *p_throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	cpu = p_tstate->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	pr = per_cpu(processors, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (!pr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (!pr->flags.throttling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 				"unsupported on CPU %d\n", cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	target_state = p_tstate->target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	p_throttling = &(pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	case THROTTLING_PRECHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		 * Prechange event is used to choose one proper t-state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		 * which meets the limits of thermal, user and _TPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		p_limit = &pr->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		if (p_limit->thermal.tx > target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			target_state = p_limit->thermal.tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		if (p_limit->user.tx > target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			target_state = p_limit->user.tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (pr->throttling_platform_limit > target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			target_state = pr->throttling_platform_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (target_state >= p_throttling->state_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				"Exceed the limit of T-state \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			target_state = p_throttling->state_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		p_tstate->target_state = target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				"target T-state of CPU %d is T%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 				cpu, target_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	case THROTTLING_POSTCHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		 * Postchange event is only used to update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		 * T-state flag of acpi_processor_throttling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		p_throttling->state = target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				"CPU %d is switched to T%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 				cpu, target_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			"Unsupported Throttling notifier event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * _TPC - Throttling Present Capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	acpi_status status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	unsigned long long tpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (ignore_tpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		if (status != AE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	pr->throttling_platform_limit = (int)tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	int throttling_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	int current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct acpi_processor_limit *limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (ignore_tpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	result = acpi_processor_get_platform_limit(pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		/* Throttling Limit is unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	throttling_limit = pr->throttling_platform_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (throttling_limit >= pr->throttling.state_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		/* Uncorrect Throttling Limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	current_state = pr->throttling.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (current_state > throttling_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		 * The current state can meet the requirement of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		 * _TPC limit. But it is reasonable that OSPM changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		 * t-states from high to low for better performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		 * Of course the limit condition of thermal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		 * and user should be considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		limit = &pr->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		target_state = throttling_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (limit->thermal.tx > target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			target_state = limit->thermal.tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		if (limit->user.tx > target_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			target_state = limit->user.tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	} else if (current_state == throttling_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		 * Unnecessary to change the throttling state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		 * If the current state is lower than the limit of _TPC, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * will be forced to switch to the throttling state defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * by throttling_platfor_limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 * Because the previous state meets with the limit condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		 * of thermal and user, it is unnecessary to check it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		target_state = throttling_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	return acpi_processor_set_throttling(pr, target_state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * This function is used to reevaluate whether the T-state is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * after one CPU is onlined/offlined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * It is noted that it won't reevaluate the following properties for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * the T-state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  *	1. Control method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  *	2. the number of supported T-state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  *	3. TSD domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 					bool is_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (is_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		/* When one CPU is offline, the T-state throttling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		 * will be invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		pr->flags.throttling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	/* the following is to recheck whether the T-state is valid for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 * the online CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (!pr->throttling.state_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		/* If the number of T-state is invalid, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		 * invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		pr->flags.throttling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	pr->flags.throttling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/* Disable throttling (if enabled).  We'll let subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 * policy (e.g.thermal) decide to lower performance if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * so chooses, but for now we'll crank up the speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	result = acpi_processor_get_throttling(pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (pr->throttling.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		result = acpi_processor_set_throttling(pr, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		pr->flags.throttling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * _PTC - Processor Throttling Control (and status) register location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	acpi_status status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	union acpi_object *ptc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	union acpi_object obj = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct acpi_processor_throttling *throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (status != AE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	ptc = (union acpi_object *)buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	    || (ptc->package.count != 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 * control_register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	obj = ptc->package.elements[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if ((obj.type != ACPI_TYPE_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	    || (obj.buffer.pointer == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		       "Invalid _PTC data (control_register)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	       sizeof(struct acpi_ptc_register));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * status_register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	obj = ptc->package.elements[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if ((obj.type != ACPI_TYPE_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	    || (obj.buffer.pointer == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	       sizeof(struct acpi_ptc_register));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	throttling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if ((throttling->control_register.bit_width +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		throttling->control_register.bit_offset) > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if ((throttling->status_register.bit_width +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		throttling->status_register.bit_offset) > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)       end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * _TSS - Throttling Supported States
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	acpi_status status = AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct acpi_buffer state = { 0, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	union acpi_object *tss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		if (status != AE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	tss = buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			  tss->package.count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	pr->throttling.state_count = tss->package.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	pr->throttling.states_tss =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	    kmalloc_array(tss->package.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			  sizeof(struct acpi_processor_tx_tss),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (!pr->throttling.states_tss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	for (i = 0; i < pr->throttling.state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		struct acpi_processor_tx_tss *tx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 						      states_tss[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		state.length = sizeof(struct acpi_processor_tx_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		state.pointer = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		status = acpi_extract_package(&(tss->package.elements[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 					      &format, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			kfree(pr->throttling.states_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		if (!tx->freqpercentage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			       "Invalid _TSS data: freq is zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			kfree(pr->throttling.states_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)       end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * _TSD - T-State Dependencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static int acpi_processor_get_tsd(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	acpi_status status = AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct acpi_buffer state = { 0, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	union acpi_object *tsd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	struct acpi_tsd_package *pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	struct acpi_processor_throttling *pthrottling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	pthrottling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	pthrottling->tsd_valid_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		if (status != AE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	tsd = buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (tsd->package.count != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	pdomain = &(pr->throttling.domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	state.length = sizeof(struct acpi_tsd_package);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	state.pointer = pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	status = acpi_extract_package(&(tsd->package.elements[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				      &format, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	pthrottling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	pthrottling->tsd_valid_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	pthrottling->shared_type = pdomain->coord_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 * If the coordination type is not defined in ACPI spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 * the tsd_valid_flag will be clear and coordination type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		pthrottling->tsd_valid_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)       end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) /* --------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)                               Throttling Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)    -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	int state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	u32 duty_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	u32 duty_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * We don't care about error returns - we just try to mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * these reserved so that nobody else is confused into thinking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * that this region might be unused..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * (In particular, allocating the IO range for Cardbus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	pr->throttling.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	duty_mask = pr->throttling.state_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	duty_mask <<= pr->throttling.duty_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	value = inl(pr->throttling.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 * Compute the current throttling state when throttling is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * (bit 4 is on).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (value & 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		duty_value = value & duty_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		duty_value >>= pr->throttling.duty_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		if (duty_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			state = pr->throttling.state_count - duty_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	pr->throttling.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			  "Throttling state is T%d (%d%% throttling applied)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			  state, pr->throttling.states[state].performance));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) static int acpi_throttling_rdmsr(u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	u64 msr_high, msr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	u64 msr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		!this_cpu_has(X86_FEATURE_ACPI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			"HARDWARE addr space,NOT supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		msr_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		msr_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			(u32 *)&msr_low , (u32 *) &msr_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		msr = (msr_high << 32) | msr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		*value = (u64) msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) static int acpi_throttling_wrmsr(u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		!this_cpu_has(X86_FEATURE_ACPI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			"HARDWARE addr space,NOT supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		msr = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			msr & 0xffffffff, msr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) static int acpi_throttling_rdmsr(u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		"HARDWARE addr space,NOT supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static int acpi_throttling_wrmsr(u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		"HARDWARE addr space,NOT supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static int acpi_read_throttling_status(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 					u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	u32 bit_width, bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	u32 ptc_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	u64 ptc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	struct acpi_processor_throttling *throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	throttling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	switch (throttling->status_register.space_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	case ACPI_ADR_SPACE_SYSTEM_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		bit_width = throttling->status_register.bit_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		bit_offset = throttling->status_register.bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		acpi_os_read_port((acpi_io_address) throttling->status_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				  address, &ptc_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				  (u32) (bit_width + bit_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		ptc_mask = (1 << bit_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		ret = acpi_throttling_rdmsr(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		       (u32) (throttling->status_register.space_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) static int acpi_write_throttling_state(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	u32 bit_width, bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	u64 ptc_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	u64 ptc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	struct acpi_processor_throttling *throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	throttling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	switch (throttling->control_register.space_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	case ACPI_ADR_SPACE_SYSTEM_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		bit_width = throttling->control_register.bit_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		bit_offset = throttling->control_register.bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		ptc_mask = (1 << bit_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		ptc_value = value & ptc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		acpi_os_write_port((acpi_io_address) throttling->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 					control_register.address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 					(u32) (ptc_value << bit_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 					(u32) (bit_width + bit_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		ret = acpi_throttling_wrmsr(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		       (u32) (throttling->control_register.space_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static int acpi_get_throttling_state(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	for (i = 0; i < pr->throttling.state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		struct acpi_processor_tx_tss *tx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 						      states_tss[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		if (tx->control == value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static int acpi_get_throttling_value(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			int state, u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (state >= 0 && state <= pr->throttling.state_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		struct acpi_processor_tx_tss *tx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 						      states_tss[state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		*value = tx->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	int state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	pr->throttling.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	ret = acpi_read_throttling_status(pr, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		state = acpi_get_throttling_state(pr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		if (state == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				"Invalid throttling state, reset\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			ret = __acpi_processor_set_throttling(pr, state, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 							      true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		pr->throttling.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static long __acpi_processor_get_throttling(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	struct acpi_processor *pr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return pr->throttling.acpi_processor_get_throttling(pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) static int acpi_processor_get_throttling(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	 * This is either called from the CPU hotplug callback of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	 * processor_driver or via the ACPI probe function. In the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * case the CPU is not guaranteed to be online. Both call sites are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * protected against CPU hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (!cpu_online(pr->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	int i, step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (!pr->throttling.address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	} else if (!pr->throttling.duty_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* TBD: Support duty_cycle values that span bit 4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * Compute state values. Note that throttling displays a linear power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * performance relationship (at 50% performance the CPU will consume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	step = (1000 / pr->throttling.state_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	for (i = 0; i < pr->throttling.state_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		pr->throttling.states[i].performance = 1000 - step * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		pr->throttling.states[i].power = 1000 - step * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 					      int state, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	u32 duty_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	u32 duty_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (!force && (state == pr->throttling.state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (state < pr->throttling_platform_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 * Calculate the duty_value and duty_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		duty_value = pr->throttling.state_count - state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		duty_value <<= pr->throttling.duty_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		/* Used to clear all duty_value bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		duty_mask = pr->throttling.state_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		duty_mask <<= acpi_gbl_FADT.duty_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		duty_mask = ~duty_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * turn it off before you can change the duty_value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	value = inl(pr->throttling.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (value & 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		value &= 0xFFFFFFEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		outl(value, pr->throttling.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * Write the new duty_value and then enable throttling.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * that a state value of 0 leaves throttling disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		value &= duty_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		value |= duty_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		outl(value, pr->throttling.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		value |= 0x00000010;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		outl(value, pr->throttling.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	pr->throttling.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			  "Throttling state set to T%d (%d%%)\n", state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			  (pr->throttling.states[state].performance ? pr->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			   throttling.states[state].performance / 10 : 0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 					     int state, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (!force && (state == pr->throttling.state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (state < pr->throttling_platform_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	ret = acpi_get_throttling_value(pr, state, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		acpi_write_throttling_state(pr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		pr->throttling.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static long acpi_processor_throttling_fn(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct acpi_processor_throttling_arg *arg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	struct acpi_processor *pr = arg->pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	return pr->throttling.acpi_processor_set_throttling(pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			arg->target_state, arg->force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static int __acpi_processor_set_throttling(struct acpi_processor *pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 					   int state, bool force, bool direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct acpi_processor *match_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct acpi_processor_throttling *p_throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct acpi_processor_throttling_arg arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct throttling_tstate t_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (!pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (!pr->flags.throttling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	if (cpu_is_offline(pr->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		 * the cpu pointed by pr->id is offline. Unnecessary to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		 * the throttling state any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	t_state.target_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	p_throttling = &(pr->throttling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 * The throttling notifier will be called for every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 * affected cpu in order to get one proper T-state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 * The notifier event is THROTTLING_PRECHANGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		t_state.cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 							&t_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	 * The function of acpi_processor_set_throttling will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 * it is necessary to call it for every affected cpu. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 * it can be called only for the cpu pointed by pr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		arg.pr = pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		arg.target_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		arg.force = force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				  direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		 * When the T-state coordination is SW_ALL or HW_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		 * it is necessary to set T-state for every affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		 * cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		for_each_cpu_and(i, cpu_online_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		    p_throttling->shared_cpu_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			match_pr = per_cpu(processors, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			 * If the pointer is invalid, we will report the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			 * error message and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			if (!match_pr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 					"Invalid Pointer for CPU %d\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			 * If the throttling control is unsupported on CPU i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			 * we will report the error message and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			if (!match_pr->flags.throttling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 					"Throttling Control is unsupported "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 					"on CPU %d\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			arg.pr = match_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			arg.target_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			arg.force = force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 					  &arg, direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	 * After the set_throttling is called, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	 * throttling notifier is called for every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 * affected cpu to update the T-states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	 * The notifier event is THROTTLING_POSTCHANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		t_state.cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 							&t_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				  bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	return __acpi_processor_set_throttling(pr, state, force, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int acpi_processor_get_throttling_info(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct acpi_processor_throttling *pthrottling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			  pr->throttling.address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			  pr->throttling.duty_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			  pr->throttling.duty_width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	 * Evaluate _PTC, _TSS and _TPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * They must all be present or none of them can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (acpi_processor_get_throttling_control(pr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		acpi_processor_get_throttling_states(pr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		acpi_processor_get_platform_limit(pr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		pr->throttling.acpi_processor_get_throttling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		    &acpi_processor_get_throttling_fadt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		pr->throttling.acpi_processor_set_throttling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		    &acpi_processor_set_throttling_fadt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (acpi_processor_get_fadt_info(pr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		pr->throttling.acpi_processor_get_throttling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		    &acpi_processor_get_throttling_ptc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		pr->throttling.acpi_processor_set_throttling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		    &acpi_processor_set_throttling_ptc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	 * If TSD package for one CPU can't be parsed successfully, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	 * that this CPU will have no coordination with other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (acpi_processor_get_tsd(pr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		pthrottling = &pr->throttling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		pthrottling->tsd_valid_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 * This shouldn't be an issue as few (if any) mobile systems ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	 * used this part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (errata.piix4.throttle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 				  "Throttling not supported on PIIX4 A- or B-step\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			  pr->throttling.state_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	pr->flags.throttling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * thermal) decide to lower performance if it so chooses, but for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * we'll crank up the speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	result = acpi_processor_get_throttling(pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (pr->throttling.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 				  "Disabling throttling (was T%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				  pr->throttling.state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		result = acpi_processor_set_throttling(pr, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)       end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		pr->flags.throttling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)