^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interconnect.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pm_opp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define LUT_MAX_ENTRIES 40U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define LUT_SRC GENMASK(31, 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define LUT_L_VAL GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define LUT_CORE_COUNT GENMASK(18, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define LUT_VOLT GENMASK(11, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define CLK_HW_DIV 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define LUT_TURBO_IND 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct qcom_cpufreq_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 reg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 reg_freq_lut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 reg_volt_lut;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 reg_perf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u8 lut_row_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct qcom_cpufreq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const struct qcom_cpufreq_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static unsigned long cpu_hw_rate, xo_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static bool icc_scaling_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long freq_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long freq_hz = freq_khz * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev = get_cpu_device(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (IS_ERR(opp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ret = dev_pm_opp_set_bw(dev, opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int qcom_cpufreq_update_opp(struct device *cpu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long freq_khz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long volt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long freq_hz = freq_khz * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Skip voltage update if the opp table is not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!icc_scaling_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return dev_pm_opp_add(cpu_dev, freq_hz, volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return dev_pm_opp_enable(cpu_dev, freq_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct qcom_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long freq = policy->freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) writel_relaxed(index, data->base + soc_data->reg_perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (icc_scaling_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) qcom_cpufreq_set_bw(policy, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct qcom_cpufreq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const struct qcom_cpufreq_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) policy = cpufreq_cpu_get_raw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) soc_data = data->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) index = readl_relaxed(data->base + soc_data->reg_perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) index = min(index, LUT_MAX_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return policy->freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct qcom_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) index = policy->cached_resolved_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) writel_relaxed(index, data->base + soc_data->reg_perf_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return policy->freq_table[index].frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 data, src, lval, i, core_count, prev_freq = 0, freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct cpufreq_frequency_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct qcom_cpufreq_data *drv_data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = dev_pm_opp_of_add_table(cpu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Disable all opps and cross-validate against LUT later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) icc_scaling_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (rate = 0; ; rate++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (IS_ERR(opp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dev_pm_opp_disable(cpu_dev, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else if (ret != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev_err(cpu_dev, "Invalid opp table in device tree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) policy->fast_switch_possible = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) icc_scaling_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for (i = 0; i < LUT_MAX_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) i * soc_data->lut_row_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) src = FIELD_GET(LUT_SRC, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) lval = FIELD_GET(LUT_L_VAL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) core_count = FIELD_GET(LUT_CORE_COUNT, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) i * soc_data->lut_row_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) volt = FIELD_GET(LUT_VOLT, data) * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) freq = xo_rate * lval / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) freq = cpu_hw_rate / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (freq != prev_freq && core_count != LUT_TURBO_IND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) table[i].frequency = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) freq, core_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) table[i].frequency = CPUFREQ_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) } else if (core_count == LUT_TURBO_IND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) table[i].frequency = CPUFREQ_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Two of the same frequencies with the same core counts means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * end of table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (i > 0 && prev_freq == freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct cpufreq_frequency_table *prev = &table[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Only treat the last frequency that might be a boost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * as the boost frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) prev->frequency = prev_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) prev->flags = CPUFREQ_BOOST_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) prev_freq = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) table[i].frequency = CPUFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) policy->freq_table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void qcom_get_related_cpus(int index, struct cpumask *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct device_node *cpu_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cpu_np = of_cpu_device_node_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!cpu_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) "#freq-domain-cells", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) of_node_put(cpu_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (index == args.args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cpumask_set_cpu(cpu, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static const struct qcom_cpufreq_soc_data qcom_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .reg_enable = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .reg_freq_lut = 0x110,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .reg_volt_lut = 0x114,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .reg_perf_state = 0x920,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .lut_row_size = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static const struct qcom_cpufreq_soc_data epss_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .reg_enable = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .reg_freq_lut = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .reg_volt_lut = 0x200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .reg_perf_state = 0x320,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .lut_row_size = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static const struct of_device_id qcom_cpufreq_hw_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct platform_device *pdev = cpufreq_get_driver_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct device_node *cpu_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct device *cpu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct qcom_cpufreq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cpu_dev = get_cpu_device(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!cpu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pr_err("%s: failed to get cpu%d device\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) cpu_np = of_cpu_device_node_get(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!cpu_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) "#freq-domain-cells", 0, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) of_node_put(cpu_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) index = args.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) res = platform_get_resource(pdev, IORESOURCE_MEM, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_err(dev, "failed to get mem resource %d\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!request_mem_region(res->start, resource_size(res), res->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dev_err(dev, "failed to request resource %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dev_err(dev, "failed to map resource %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto unmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) data->soc_data = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) data->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) data->res = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* HW should be in enabled state to proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) qcom_get_related_cpus(index, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!cpumask_weight(policy->cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) policy->driver_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dev_err(dev, "Domain-%d failed to read LUT\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ret = dev_pm_opp_get_opp_count(cpu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dev_err(cpu_dev, "Failed to add OPPs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unmap_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) release_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct device *cpu_dev = get_cpu_device(policy->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct qcom_cpufreq_data *data = policy->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct resource *res = data->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void __iomem *base = data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev_pm_opp_remove_all_dynamic(cpu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) kfree(policy->freq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static struct freq_attr *qcom_cpufreq_hw_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) &cpufreq_freq_attr_scaling_available_freqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) &cpufreq_freq_attr_scaling_boost_freqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static struct cpufreq_driver cpufreq_qcom_hw_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) CPUFREQ_IS_COOLING_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) .verify = cpufreq_generic_frequency_table_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .target_index = qcom_cpufreq_hw_target_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .get = qcom_cpufreq_hw_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .init = qcom_cpufreq_hw_cpu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .exit = qcom_cpufreq_hw_cpu_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .fast_switch = qcom_cpufreq_hw_fast_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .name = "qcom-cpufreq-hw",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .attr = qcom_cpufreq_hw_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct device *cpu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) clk = clk_get(&pdev->dev, "xo");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) xo_rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) clk = clk_get(&pdev->dev, "alternate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cpufreq_qcom_hw_driver.driver_data = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Check for optional interconnect paths on CPU0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cpu_dev = get_cpu_device(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!cpu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static struct platform_driver qcom_cpufreq_hw_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .probe = qcom_cpufreq_hw_driver_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .remove = qcom_cpufreq_hw_driver_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .name = "qcom-cpufreq-hw",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .of_match_table = qcom_cpufreq_hw_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int __init qcom_cpufreq_hw_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return platform_driver_register(&qcom_cpufreq_hw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) postcore_initcall(qcom_cpufreq_hw_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static void __exit qcom_cpufreq_hw_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) platform_driver_unregister(&qcom_cpufreq_hw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) module_exit(qcom_cpufreq_hw_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) MODULE_LICENSE("GPL v2");