Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2005 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * 	- Added _PDC for SMP C-states on Intel CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <acpi/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/mwait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/special_insns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Initialize bm_flags based on the CPU cache properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * On SMP it depends on cache configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * - When cache is not shared among all CPUs, we flush cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *   before entering C3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * - When cache is shared among all CPUs, we use bm_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *   mechanism as in UP case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * This routine is called only after all the CPUs are online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 					unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	flags->bm_check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	if (num_online_cpus() == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		flags->bm_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	else if (c->x86_vendor == X86_VENDOR_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		 * Today all MP CPUs that support C3 share cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		 * And caches should not be flushed by software while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		 * entering C3 type state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		flags->bm_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * On all recent Intel platforms, ARB_DISABLE is a nop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * So, set bm_control to zero to indicate that ARB_DISABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * is not required while entering C3 type state on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 * P4, Core and beyond CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (c->x86_vendor == X86_VENDOR_INTEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			flags->bm_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * For all recent Centaur CPUs, the ucode will make sure that each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * core can keep cache coherence with each other while entering C3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * type state. So, set bm_check to 1 to indicate that the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * doesn't need to execute a cache flush operation (WBINVD) when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * entering C3 type state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		    c->x86_stepping >= 0x0e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			flags->bm_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		 * All Zhaoxin CPUs that support C3 share cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		 * And caches should not be flushed by software while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		 * entering C3 type state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		flags->bm_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		 * On all recent Zhaoxin platforms, ARB_DISABLE is a nop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		 * So, set bm_control to zero to indicate that ARB_DISABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 * is not required while entering C3 type state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		flags->bm_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) /* The code below handles cstate entry with monitor-mwait pair on Intel*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) struct cstate_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		unsigned int eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		unsigned int ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	} states[ACPI_PROCESSOR_MAX_POWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define NATIVE_CSTATE_BEYOND_HALT	(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct acpi_processor_cx *cx = _cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	long retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned int edx_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	unsigned int num_cstate_subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/* Check whether this particular cx_type (in CST) is supported or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			MWAIT_CSTATE_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	/* If the HW does not support any sub-states in this C-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (num_cstate_subtype == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 				cx->address, edx_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		retval = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		retval = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (!mwait_supported[cstate_type]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		mwait_supported[cstate_type] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			"Monitor-Mwait will be used to enter C-%d state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			cx->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	snprintf(cx->desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			cx->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int acpi_processor_ffh_cstate_probe(unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct cstate_entry *percpu_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	long retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	percpu_entry->states[cx->index].eax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	percpu_entry->states[cx->index].ecx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/* Make sure we are running on right CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			     false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (retval == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		/* Use the hint in CST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		percpu_entry->states[cx->index].eax = cx->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * then we should skip checking BM_STS for this C-state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		cx->bm_sts_skip = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct cstate_entry *percpu_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	                      percpu_entry->states[cx->index].ecx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int __init ffh_cstate_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct cpuinfo_x86 *c = &boot_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (c->x86_vendor != X86_VENDOR_INTEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	    c->x86_vendor != X86_VENDOR_AMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void __exit ffh_cstate_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	free_percpu(cpu_cstate_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	cpu_cstate_entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) arch_initcall(ffh_cstate_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __exitcall(ffh_cstate_exit);