^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * POWERNV cpufreq driver for the IBM POWER processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright IBM 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) "powernv-cpufreq: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/opal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define POWERNV_MAX_PSTATES_ORDER 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PMSR_PSAFE_ENABLE (1UL << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PMSR_SPR_EM_DISABLE (1UL << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MAX_PSTATE_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define LPSTATE_SHIFT 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define GPSTATE_SHIFT 56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MAX_NR_CHIPS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MAX_RAMP_DOWN_TIME 5120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * On an idle system we want the global pstate to ramp-down from max value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * then ramp-down rapidly later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * This gives a percentage rampdown for time elapsed in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * ramp_down_percentage = ((ms * ms) >> 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * ~= 3.8 * (sec * sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * At 0 ms ramp_down_percent = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * At 5120 ms ramp_down_percent = 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define ramp_down_percent(time) ((time * time) >> 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Interval after which the timer is queued to bring down global pstate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define GPSTATE_TIMER_INTERVAL 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * struct global_pstate_info - Per policy data structure to maintain history of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * global pstates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @highest_lpstate_idx: The local pstate index from which we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * ramping down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @elapsed_time: Time in ms spent in ramping down from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * highest_lpstate_idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @last_sampled_time: Time from boot in ms when global pstates were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * last set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @last_lpstate_idx: Last set value of local pstate and global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @last_gpstate_idx: pstate in terms of cpufreq table index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @timer: Is used for ramping down if cpu goes idle for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * a long time with global pstate held high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @gpstate_lock: A spinlock to maintain synchronization between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * routines called by the timer handler and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * governer's target_index calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @policy: Associated CPUFreq policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct global_pstate_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int highest_lpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int elapsed_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int last_sampled_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int last_lpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int last_gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spinlock_t gpstate_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * indexed by a function of pstate id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @pstate_id: pstate id for this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @cpufreq_table_idx: Index into the powernv_freqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * cpufreq_frequency_table for frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * corresponding to pstate_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @hentry: hlist_node that hooks this entry into the pstate_revmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * hashtable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct pstate_idx_revmap_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u8 pstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int cpufreq_table_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct hlist_node hentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static bool rebooting, throttled, occ_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static const char * const throttle_reason[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "No throttling",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) "Power Cap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) "Processor Over Temperature",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "Power Supply Failure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "Over Current",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "OCC Reset"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) enum throttle_reason_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) NO_THROTTLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) POWERCAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) CPU_OVERTEMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) POWER_SUPPLY_FAILURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) OVERCURRENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) OCC_RESET_THROTTLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) OCC_MAX_REASON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct chip {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bool throttled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bool restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u8 throttle_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct work_struct throttle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int throttle_turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int throttle_sub_turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int reason[OCC_MAX_REASON];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } *chips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int nr_chips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static DEFINE_PER_CPU(struct chip *, chip_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * The set of pstates consists of contiguous integers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * powernv_pstate_info stores the index of the frequency table for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * max, min and nominal frequencies. It also stores number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * available frequencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * powernv_pstate_info.nominal indicates the index to the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * non-turbo frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static struct powernv_pstate_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int nr_pstates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool wof_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } powernv_pstate_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return ((pmsr_val >> shift) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Use following functions for conversions between pstate_id and index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * idx_to_pstate : Returns the pstate id corresponding to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * frequency in the cpufreq frequency table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * powernv_freqs indexed by @i.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * If @i is out of bound, this will return the pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * corresponding to the nominal frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline u8 idx_to_pstate(unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return powernv_freqs[powernv_pstate_info.nominal].driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return powernv_freqs[i].driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * pstate_to_idx : Returns the index in the cpufreq frequencytable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * powernv_freqs for the frequency whose corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * pstate id is @pstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If no frequency corresponding to @pstate is found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * this will return the index of the nominal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static unsigned int pstate_to_idx(u8 pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int key = pstate % POWERNV_MAX_PSTATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct pstate_idx_revmap_data *revmap_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (revmap_data->pstate_id == pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return revmap_data->cpufreq_table_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return powernv_pstate_info.nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline void reset_gpstates(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct global_pstate_info *gpstates = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) gpstates->highest_lpstate_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) gpstates->elapsed_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) gpstates->last_sampled_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) gpstates->last_lpstate_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) gpstates->last_gpstate_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Initialize the freq table based on data obtained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * from the firmware passed via device-tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int init_powernv_pstates(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct device_node *power_mgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int i, nr_pstates = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const __be32 *pstate_ids, *pstate_freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 len_ids, len_freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 pstate_min, pstate_max, pstate_nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 pstate_turbo, pstate_ultra_turbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!power_mgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_warn("power-mgt node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pr_warn("ibm,pstate-min node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_warn("ibm,pstate-max node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) &pstate_nominal)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pr_warn("ibm,pstate-nominal not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) &pstate_ultra_turbo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) powernv_pstate_info.wof_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) &pstate_turbo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) powernv_pstate_info.wof_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (pstate_turbo == pstate_ultra_turbo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) powernv_pstate_info.wof_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) powernv_pstate_info.wof_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pstate_nominal, pstate_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pr_info("Workload Optimized Frequency is %s in the platform\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!pstate_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pr_warn("ibm,pstate-ids not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) &len_freqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!pstate_freqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pr_warn("ibm,pstate-frequencies-mhz not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (len_ids != len_freqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pr_warn("Entries in ibm,pstate-ids and "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) "ibm,pstate-frequencies-mhz does not match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!nr_pstates) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pr_warn("No PStates found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) powernv_pstate_info.nr_pstates = nr_pstates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pr_debug("NR PStates %d\n", nr_pstates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) for (i = 0; i < nr_pstates; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 id = be32_to_cpu(pstate_ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 freq = be32_to_cpu(pstate_freqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct pstate_idx_revmap_data *revmap_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned int key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_debug("PState id %d freq %d MHz\n", id, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) powernv_freqs[i].frequency = freq * 1000; /* kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) powernv_freqs[i].driver_data = id & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!revmap_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) revmap_data->pstate_id = id & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) revmap_data->cpufreq_table_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hash_add(pstate_revmap, &revmap_data->hentry, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (id == pstate_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) powernv_pstate_info.max = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (id == pstate_nominal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) powernv_pstate_info.nominal = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (id == pstate_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) powernv_pstate_info.min = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* End of list marker entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) of_node_put(power_mgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) of_node_put(power_mgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Returns the CPU frequency corresponding to the pstate_id. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static unsigned int pstate_id_to_freq(u8 pstate_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) i = pstate_to_idx(pstate_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (i >= powernv_pstate_info.nr_pstates || i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) i = powernv_pstate_info.nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return powernv_freqs[i].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return sprintf(buf, "%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) powernv_freqs[powernv_pstate_info.nominal].frequency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) __ATTR_RO(cpuinfo_nominal_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define SCALING_BOOST_FREQS_ATTR_INDEX 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static struct freq_attr *powernv_cpu_freq_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) &cpufreq_freq_attr_scaling_available_freqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) &cpufreq_freq_attr_cpuinfo_nominal_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) &cpufreq_freq_attr_scaling_boost_freqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define throttle_attr(name, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct chip *chip = per_cpu(chip_info, policy->cpu); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return sprintf(buf, "%u\n", chip->member); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) throttle_attr(unthrottle, reason[NO_THROTTLE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) throttle_attr(powercap, reason[POWERCAP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) throttle_attr(overtemp, reason[CPU_OVERTEMP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) throttle_attr(overcurrent, reason[OVERCURRENT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) throttle_attr(turbo_stat, throttle_turbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) throttle_attr(sub_turbo_stat, throttle_sub_turbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static struct attribute *throttle_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) &throttle_attr_unthrottle.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) &throttle_attr_powercap.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) &throttle_attr_overtemp.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) &throttle_attr_supply_fault.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) &throttle_attr_overcurrent.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) &throttle_attr_occ_reset.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) &throttle_attr_turbo_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) &throttle_attr_sub_turbo_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static const struct attribute_group throttle_attr_grp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .name = "throttle_stats",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .attrs = throttle_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Helper routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Access helpers to power mgt SPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static inline unsigned long get_pmspr(unsigned long sprn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) switch (sprn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case SPRN_PMCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return mfspr(SPRN_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case SPRN_PMICR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return mfspr(SPRN_PMICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case SPRN_PMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return mfspr(SPRN_PMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static inline void set_pmspr(unsigned long sprn, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) switch (sprn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) case SPRN_PMCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mtspr(SPRN_PMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case SPRN_PMICR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mtspr(SPRN_PMICR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Use objects of this type to query/update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * pstates on a remote CPU via smp_call_function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct powernv_smp_call_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u8 pstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u8 gpstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * powernv_read_cpu_freq: Reads the current frequency on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Called via smp_call_function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Note: The caller of the smp_call_function should pass an argument of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * the type 'struct powernv_smp_call_data *' along with this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * The current frequency on this CPU will be returned via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * ((struct powernv_smp_call_data *)arg)->freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void powernv_read_cpu_freq(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long pmspr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct powernv_smp_call_data *freq_data = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pmspr_val = get_pmspr(SPRN_PMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) freq_data->pstate_id = extract_local_pstate(pmspr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) freq_data->freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * powernv_cpufreq_get: Returns the CPU frequency as reported by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * firmware for CPU 'cpu'. This value is reported through the sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * file cpuinfo_cur_freq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static unsigned int powernv_cpufreq_get(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct powernv_smp_call_data freq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) &freq_data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return freq_data.freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * set_pstate: Sets the pstate on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * This is called via an smp_call_function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * The caller must ensure that freq_data is of the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * on this CPU should be present in freq_data->pstate_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void set_pstate(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct powernv_smp_call_data *freq_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned long pstate_ul = freq_data->pstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned long gpstate_ul = freq_data->gpstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) val = get_pmspr(SPRN_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) val = val & 0x0000FFFFFFFFFFFFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pstate_ul = pstate_ul & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) gpstate_ul = gpstate_ul & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Set both global(bits 56..63) and local(bits 48..55) PStates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) val = val | (gpstate_ul << 56) | (pstate_ul << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pr_debug("Setting cpu %d pmcr to %016lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) raw_smp_processor_id(), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) set_pmspr(SPRN_PMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * get_nominal_index: Returns the index corresponding to the nominal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * pstate in the cpufreq table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline unsigned int get_nominal_index(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return powernv_pstate_info.nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void powernv_cpufreq_throttle_check(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long pmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u8 pmsr_pmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int pmsr_pmax_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pmsr = get_pmspr(SPRN_PMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) chip = this_cpu_read(chip_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Check for Pmax Capping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pmsr_pmax = extract_max_pstate(pmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (pmsr_pmax_idx != powernv_pstate_info.max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (chip->throttled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) chip->throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) cpu, chip->id, pmsr_pmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) idx_to_pstate(powernv_pstate_info.nominal));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) chip->throttle_sub_turbo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) chip->throttle_turbo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) trace_powernv_throttle(chip->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) throttle_reason[chip->throttle_reason],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pmsr_pmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } else if (chip->throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) chip->throttled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) trace_powernv_throttle(chip->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) throttle_reason[chip->throttle_reason],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pmsr_pmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Check if Psafe_mode_active is set in PMSR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (pmsr & PMSR_PSAFE_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pr_info("Pstate set to safe frequency\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Check if SPR_EM_DISABLE is set in PMSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (pmsr & PMSR_SPR_EM_DISABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) pr_info("Frequency Control disabled from OS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pr_info("PMSR = %16lx\n", pmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pr_warn("CPU Frequency could be throttled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * calc_global_pstate - Calculate global pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @elapsed_time: Elapsed time in milliseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @local_pstate_idx: New local pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @highest_lpstate_idx: pstate from which its ramping down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Finds the appropriate global pstate based on the pstate from which its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * ramping down and the time elapsed in ramping down. It follows a quadratic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * equation which ensures that it reaches ramping down to pmin in 5sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static inline int calc_global_pstate(unsigned int elapsed_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int highest_lpstate_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int local_pstate_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int index_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Using ramp_down_percent we get the percentage of rampdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * that we are expecting to be dropping. Difference between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * highest_lpstate_idx and powernv_pstate_info.min will give a absolute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * number of how many pstates we will drop eventually by the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * 5 seconds, then just scale it get the number pstates to be dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) index_diff = ((int)ramp_down_percent(elapsed_time) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Ensure that global pstate is >= to local pstate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (highest_lpstate_idx + index_diff >= local_pstate_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return local_pstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return highest_lpstate_idx + index_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int timer_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * seconds of ramp down time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) > MAX_RAMP_DOWN_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) timer_interval = GPSTATE_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * gpstate_timer_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * @t: Timer context used to fetch global pstate info struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * This handler brings down the global pstate closer to the local pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * according quadratic equation. Queues a new timer if it is still not equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * to local pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void gpstate_timer_handler(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct cpufreq_policy *policy = gpstates->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int gpstate_idx, lpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int time_diff = jiffies_to_msecs(jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) - gpstates->last_sampled_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct powernv_smp_call_data freq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!spin_trylock(&gpstates->gpstate_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * If the timer has migrated to the different cpu then bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * it back to one of the policy->cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_unlock(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * If PMCR was last updated was using fast_swtich then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * We may have wrong in gpstate->last_lpstate_idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * value. Hence, read from PMCR to get correct data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) val = get_pmspr(SPRN_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) freq_data.gpstate_id = extract_global_pstate(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) freq_data.pstate_id = extract_local_pstate(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (freq_data.gpstate_id == freq_data.pstate_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) reset_gpstates(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) spin_unlock(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) gpstates->last_sampled_time += time_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) gpstates->elapsed_time += time_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) gpstate_idx = pstate_to_idx(freq_data.pstate_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) lpstate_idx = gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) reset_gpstates(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) gpstates->highest_lpstate_idx = gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) lpstate_idx = pstate_to_idx(freq_data.pstate_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) gpstates->highest_lpstate_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) lpstate_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) gpstates->last_gpstate_idx = gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) gpstates->last_lpstate_idx = lpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * If local pstate is equal to global pstate, rampdown is over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * So timer is not required to be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (gpstate_idx != gpstates->last_lpstate_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) queue_gpstate_timer(gpstates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) set_pstate(&freq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spin_unlock(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * powernv_cpufreq_target_index: Sets the frequency corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * the cpufreq table entry indexed by new_index on the cpus in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * mask policy->cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned int new_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct powernv_smp_call_data freq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned int cur_msec, gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct global_pstate_info *gpstates = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (unlikely(rebooting) && new_index != get_nominal_index())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (!throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* we don't want to be preempted while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * checking if the CPU frequency has been throttled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) powernv_cpufreq_throttle_check(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cur_msec = jiffies_to_msecs(get_jiffies_64());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) freq_data.pstate_id = idx_to_pstate(new_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!gpstates) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) freq_data.gpstate_id = freq_data.pstate_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto no_gpstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_lock(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!gpstates->last_sampled_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) gpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) gpstates->highest_lpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto gpstates_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (gpstates->last_gpstate_idx < new_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) gpstates->elapsed_time += cur_msec -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) gpstates->last_sampled_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * we should be resetting all global pstate related data. Set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * equal to local pstate to start fresh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) reset_gpstates(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) gpstates->highest_lpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) gpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Elaspsed_time is less than 5 seconds, continue to rampdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) gpstates->highest_lpstate_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) new_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) reset_gpstates(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) gpstates->highest_lpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) gpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * If local pstate is equal to global pstate, rampdown is over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * So timer is not required to be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (gpstate_idx != new_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) queue_gpstate_timer(gpstates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) del_timer_sync(&gpstates->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) gpstates_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) gpstates->last_sampled_time = cur_msec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) gpstates->last_gpstate_idx = gpstate_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) gpstates->last_lpstate_idx = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) spin_unlock(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) no_gpstate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * Use smp_call_function to send IPI and execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * mtspr on target CPU. We could do that without IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * if current CPU is within policy->cpus (core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int base, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct kernfs_node *kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct global_pstate_info *gpstates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) base = cpu_first_thread_sibling(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) for (i = 0; i < threads_per_core; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cpumask_set_cpu(base + i, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!kn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pr_info("Failed to create throttle stats directory for cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) kernfs_put(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) policy->freq_table = powernv_freqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) policy->fast_switch_possible = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (pvr_version_is(PVR_POWER9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Initialise Gpstate ramp-down timer only on POWER8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (!gpstates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) policy->driver_data = gpstates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* initialize timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) gpstates->policy = policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) timer_setup(&gpstates->timer, gpstate_timer_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) TIMER_PINNED | TIMER_DEFERRABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) gpstates->timer.expires = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) spin_lock_init(&gpstates->gpstate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* timer is deleted in cpufreq_cpu_stop() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) kfree(policy->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) unsigned long action, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct cpufreq_policy *cpu_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) rebooting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cpu_policy = cpufreq_cpu_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!cpu_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cpufreq_cpu_put(cpu_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static struct notifier_block powernv_cpufreq_reboot_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) .notifier_call = powernv_cpufreq_reboot_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static void powernv_cpufreq_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct chip *chip = container_of(work, struct chip, throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) cpumask_and(&mask, &chip->mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) smp_call_function_any(&mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) powernv_cpufreq_throttle_check, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!chip->restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) chip->restore = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) for_each_cpu(cpu, &mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) policy = cpufreq_cpu_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) index = cpufreq_table_find_index_c(policy, policy->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) powernv_cpufreq_target_index(policy, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cpumask_andnot(&mask, &mask, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) cpufreq_cpu_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long msg_type, void *_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct opal_msg *msg = _msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct opal_occ_msg omsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (msg_type != OPAL_MSG_OCC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) omsg.type = be64_to_cpu(msg->params[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) switch (omsg.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) case OCC_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) occ_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * powernv_cpufreq_throttle_check() is called in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * target() callback which can detect the throttle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * for governors like ondemand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * But static governors will not call target() often thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * report throttling here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (!throttled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) throttled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) pr_warn("CPU frequency is throttled for duration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) case OCC_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case OCC_THROTTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) omsg.chip = be64_to_cpu(msg->params[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) omsg.throttle_status = be64_to_cpu(msg->params[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (occ_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) occ_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) throttled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pr_info("OCC Active, CPU frequency is no longer throttled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) for (i = 0; i < nr_chips; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) chips[i].restore = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) schedule_work(&chips[i].throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (i = 0; i < nr_chips; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (chips[i].id == omsg.chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (omsg.throttle_status >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) chips[i].throttle_reason = omsg.throttle_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) chips[i].reason[omsg.throttle_status]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!omsg.throttle_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) chips[i].restore = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) schedule_work(&chips[i].throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static struct notifier_block powernv_cpufreq_opal_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .notifier_call = powernv_cpufreq_occ_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .next = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct powernv_smp_call_data freq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct global_pstate_info *gpstates = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (gpstates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) del_timer_sync(&gpstates->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) unsigned int target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct powernv_smp_call_data freq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) index = cpufreq_table_find_index_dl(policy, target_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) freq_data.pstate_id = powernv_freqs[index].driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) freq_data.gpstate_id = powernv_freqs[index].driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) set_pstate(&freq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return powernv_freqs[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static struct cpufreq_driver powernv_cpufreq_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .name = "powernv-cpufreq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .flags = CPUFREQ_CONST_LOOPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .init = powernv_cpufreq_cpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .exit = powernv_cpufreq_cpu_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .verify = cpufreq_generic_frequency_table_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) .target_index = powernv_cpufreq_target_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .fast_switch = powernv_fast_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .get = powernv_cpufreq_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .stop_cpu = powernv_cpufreq_stop_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) .attr = powernv_cpu_freq_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int init_chip_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unsigned int *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) unsigned int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned int prev_chip_id = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cpumask_t *chip_cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Allocate a chip cpu mask large enough to fit mask for all chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!chip_cpu_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto free_and_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) unsigned int id = cpu_to_chip_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (prev_chip_id != id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) prev_chip_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) chip[nr_chips++] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!chips) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto out_free_chip_cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) for (i = 0; i < nr_chips; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) chips[i].id = chip[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) for_each_cpu(cpu, &chips[i].mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) per_cpu(chip_info, cpu) = &chips[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) out_free_chip_cpu_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) kfree(chip_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) free_and_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) kfree(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static inline void clean_chip_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* flush any pending work items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (chips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) for (i = 0; i < nr_chips; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) cancel_work_sync(&chips[i].throttle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) kfree(chips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static inline void unregister_all_notifiers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) opal_message_notifier_unregister(OPAL_MSG_OCC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) &powernv_cpufreq_opal_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int __init powernv_cpufreq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Don't probe on pseries (guest) platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!firmware_has_feature(FW_FEATURE_OPAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* Discover pstates from device tree and init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rc = init_powernv_pstates();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Populate chip info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) rc = init_chip_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (powernv_pstate_info.wof_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) powernv_cpufreq_driver.boost_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) rc = cpufreq_register_driver(&powernv_cpufreq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) pr_info("Failed to register the cpufreq driver (%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (powernv_pstate_info.wof_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) cpufreq_enable_boost_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) register_reboot_notifier(&powernv_cpufreq_reboot_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) clean_chip_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) pr_info("Platform driver disabled. System does not support PState control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) module_init(powernv_cpufreq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static void __exit powernv_cpufreq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) cpufreq_unregister_driver(&powernv_cpufreq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) unregister_all_notifiers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) clean_chip_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) module_exit(powernv_cpufreq_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");