^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Rockchip Generic dmc support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Finley Xiao <finley.xiao@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <dt-bindings/clock/rockchip-ddr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <dt-bindings/soc/rockchip-system-status.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <drm/drm_modeset_lock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/devfreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/devfreq_cooling.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/devfreq-event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/input.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pm_opp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/rockchip/rockchip_sip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/thermal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <soc/rockchip/pm_domains.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <soc/rockchip/rkfb_dmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <soc/rockchip/rockchip_dmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <soc/rockchip/rockchip_sip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <soc/rockchip/rockchip_system_monitor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <soc/rockchip/rockchip-system-status.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <soc/rockchip/rockchip_opp_select.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <soc/rockchip/scpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <uapi/drm/drm_mode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "governor.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "rockchip_dmc_timing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "../clk/rockchip/clk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "../gpu/drm/rockchip/rockchip_drm_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "../opp/opp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define system_status_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) status_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define reboot_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) reboot_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define boost_to_dmcfreq(work) container_of(work, struct rockchip_dmcfreq, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) boost_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define input_hd_to_dmcfreq(hd) container_of(hd, struct rockchip_dmcfreq, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) input_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define VIDEO_1080P_SIZE (1920 * 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define FIQ_INIT_HANDLER (0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FIQ_CPU_TGT_BOOT (0x0) /* to booting cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define FIQ_NUM_FOR_DCF (143) /* NA irq map to fiq for dcf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define DTS_PAR_OFFSET (4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define FALLBACK_STATIC_TEMPERATURE 55000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct dmc_freq_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct share_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 lcdc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 vop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 vop_dclk_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 sr_idle_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 addr_mcu_el3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * 1: need to wait flag1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * 0: never wait flag1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 wait_flag1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * 1: need to wait flag1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 0: never wait flag1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 wait_flag0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 complt_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 update_drv_odt_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 update_deskew_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 freq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 freq_info_mhz[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* if need, add parameter after */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct share_params *ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct rockchip_dmcfreq_ondemand_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int upthreshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int downdifferential;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct rockchip_dmcfreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct dmcfreq_common_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct rockchip_dmcfreq_ondemand_data ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct clk *dmc_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct devfreq_event_dev **edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct mutex lock; /* serializes access to video_info_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct dram_timing *timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct regulator *vdd_center;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct regulator *mem_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct notifier_block status_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct notifier_block panic_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct list_head video_info_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct freq_map_table *cpu_bw_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct work_struct boost_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct input_handler input_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct monitor_dev_info *mdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct share_params *set_rate_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long *nocp_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long volt, mem_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long sleep_volt, sleep_mem_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long auto_min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long status_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long normal_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long video_1080p_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long video_4k_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long video_4k_10b_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long video_svep_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long performance_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long hdmi_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long hdmirx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long idle_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long suspend_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long reboot_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long boost_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long fixed_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long low_power_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long freq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long freq_info_rate[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long rate_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long rate_mid_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long rate_mid_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long rate_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int min_cpu_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int system_status_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int edev_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int dfi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int nocp_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int regulator_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bool is_fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool is_set_rate_direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct thermal_cooling_device *devfreq_cooling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 static_coefficient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) s32 ts[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct thermal_zone_device *ddr_tz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned int touchboostpulse_duration_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u64 touchboostpulse_endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int (*set_auto_self_refresh)(u32 en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static struct pm_qos_request pm_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static struct monitor_dev_profile dmc_mdevp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .type = MONITOR_TPYE_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .update_volt = rockchip_monitor_check_rate_volt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .set_opp = rockchip_dmcfreq_opp_helper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline unsigned long is_dualview(unsigned long status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return (status & SYS_STATUS_LCDC0) && (status & SYS_STATUS_LCDC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline unsigned long is_isp(unsigned long status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return (status & SYS_STATUS_ISP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) (status & SYS_STATUS_CIF0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) (status & SYS_STATUS_CIF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * function: packaging de-skew setting to px30_ddr_dts_config_timing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * px30_ddr_dts_config_timing will pass to trust firmware, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * used direct to set register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * input: de_skew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * output: tim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void px30_de_skew_set_2_reg(struct rk3328_ddr_de_skew_setting *de_skew,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct px30_ddr_dts_config_timing *tim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* CA de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) offset = n / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) shift = n % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) tim->ca_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* CS0 data de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) offset = ((n / 21) * 11) + ((n % 21) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) shift = ((n % 21) % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if ((n % 21) == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tim->cs0_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* CS1 data de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) offset = ((n / 21) * 11) + ((n % 21) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) shift = ((n % 21) % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if ((n % 21) == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) tim->cs1_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * function: packaging de-skew setting to rk3328_ddr_dts_config_timing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * rk3328_ddr_dts_config_timing will pass to trust firmware, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * used direct to set register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * input: de_skew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * output: tim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rk3328_de_skew_setting_2_register(struct rk3328_ddr_de_skew_setting *de_skew,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct rk3328_ddr_dts_config_timing *tim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* CA de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) offset = n / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) shift = n % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tim->ca_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* CS0 data de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) offset = ((n / 21) * 11) + ((n % 21) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) shift = ((n % 21) % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if ((n % 21) == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tim->cs0_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* CS1 data de-skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) offset = ((n / 21) * 11) + ((n % 21) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) shift = ((n % 21) % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if ((n % 21) == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* 0 => 4; 1 => 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) shift = (shift == 0) ? 4 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tim->cs1_skew[offset] &= ~(0xf << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static int rk_drm_get_lcdc_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 lcdc_type = rockchip_drm_get_sub_dev_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) switch (lcdc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) case DRM_MODE_CONNECTOR_DPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) case DRM_MODE_CONNECTOR_LVDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) lcdc_type = SCREEN_LVDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case DRM_MODE_CONNECTOR_DisplayPort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lcdc_type = SCREEN_DP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) case DRM_MODE_CONNECTOR_HDMIA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case DRM_MODE_CONNECTOR_HDMIB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) lcdc_type = SCREEN_HDMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case DRM_MODE_CONNECTOR_TV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) lcdc_type = SCREEN_TVOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case DRM_MODE_CONNECTOR_eDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) lcdc_type = SCREEN_EDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case DRM_MODE_CONNECTOR_DSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) lcdc_type = SCREEN_MIPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) lcdc_type = SCREEN_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return lcdc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int rockchip_ddr_set_rate(unsigned long target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ddr_psci_param->hz = target_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ddr_psci_param->wait_flag1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ddr_psci_param->wait_flag0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) rockchip_dmcfreq_wait_complete();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int rockchip_dmcfreq_set_volt(struct device *dev, struct regulator *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct dev_pm_opp_supply *supply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) char *reg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dev_dbg(dev, "%s: %s voltages (mV): %lu %lu %lu\n", __func__, reg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) supply->u_volt_min, supply->u_volt, supply->u_volt_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) supply->u_volt, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __func__, supply->u_volt_min, supply->u_volt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) supply->u_volt_max, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct regulator *vdd_reg = data->regulators[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct dev_pm_opp_supply *old_supply_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct dev_pm_opp_supply *new_supply_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct regulator *mem_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct device *dev = data->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct clk *clk = data->clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct cpufreq_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long old_freq = data->old_opp.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned long freq = data->new_opp.rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned int reg_count = data->regulator_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bool is_cpufreq_changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned int cpu_cur, cpufreq_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (reg_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) old_supply_mem = &data->old_opp.supplies[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) new_supply_mem = &data->new_opp.supplies[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mem_reg = data->regulators[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * We need to prevent cpu hotplug from happening while a dmc freq rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * change is happening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Do this before taking the policy rwsem to avoid deadlocks between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * mutex that is locked/unlocked in cpu_hotplug_disable/enable. And it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * can also avoid deadlocks between the mutex that is locked/unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * in cpus_read_lock/unlock (such as store_scaling_max_freq()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (dmcfreq->min_cpu_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Go to specified cpufreq and block other cpufreq changes since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * set_rate needs to complete during vblank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cpu_cur = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) policy = cpufreq_cpu_get(cpu_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dev_err(dev, "cpu%d policy NULL\n", cpu_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto cpufreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) down_write(&policy->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cpufreq_cur = cpufreq_quick_get(cpu_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* If we're thermally throttled; don't change; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (cpufreq_cur < dmcfreq->min_cpu_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (policy->max >= dmcfreq->min_cpu_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) __cpufreq_driver_target(policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dmcfreq->min_cpu_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) is_cpufreq_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "CPU may too slow for DMC (%d MHz)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) policy->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Scaling up? Scale voltage before frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (freq >= old_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (reg_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) new_supply_mem, "mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto restore_voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "vdd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto restore_voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (freq == old_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Writer in rwsem may block readers even during its waiting in queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * and this may lead to a deadlock when the code path takes read sem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * twice (e.g. one in vop_lock() and another in rockchip_pmu_lock()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * As a (suboptimal) workaround, let writer to spin until it gets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) while (!rockchip_dmcfreq_write_trylock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dev_dbg(dev, "%lu Hz --> %lu Hz\n", old_freq, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (dmcfreq->set_rate_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dmcfreq->set_rate_params->lcdc_type = rk_drm_get_lcdc_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dmcfreq->set_rate_params->wait_flag1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dmcfreq->set_rate_params->wait_flag0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (dmcfreq->is_set_rate_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = rockchip_ddr_set_rate(freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = clk_set_rate(clk, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rockchip_dmcfreq_write_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto restore_voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Check the dpll rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * There only two result we will get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * 1. Ddr frequency scaling fail, we still get the old rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * 2. Ddr frequency scaling successful, we get the rate we set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dmcfreq->rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* If get the incorrect rate, set voltage to old value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (dmcfreq->rate != freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dev_err(dev, "Get wrong frequency, Request %lu, Current %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) freq, dmcfreq->rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto restore_voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* Scaling down? Scale voltage after frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (freq < old_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) "vdd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto restore_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (reg_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) new_supply_mem, "mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) goto restore_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dmcfreq->volt = new_supply_vdd->u_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (reg_count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dmcfreq->mem_volt = new_supply_mem->u_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) restore_freq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (dmcfreq->is_set_rate_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = rockchip_ddr_set_rate(freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = clk_set_rate(clk, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) __func__, old_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) restore_voltage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (reg_count > 1 && old_supply_mem->u_volt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) rockchip_dmcfreq_set_volt(dev, mem_reg, old_supply_mem, "mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (old_supply_vdd->u_volt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rockchip_dmcfreq_set_volt(dev, vdd_reg, old_supply_vdd, "vdd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (dmcfreq->min_cpu_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (is_cpufreq_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __cpufreq_driver_target(policy, cpufreq_cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) CPUFREQ_RELATION_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) up_write(&policy->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) cpufreq_cpu_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cpufreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int rockchip_dmcfreq_target(struct device *dev, unsigned long *freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct devfreq *devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!dmc_mdevp.is_checked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) opp = devfreq_recommended_opp(dev, freq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (IS_ERR(opp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) rockchip_monitor_volt_adjust_lock(dmcfreq->mdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = dev_pm_opp_set_rate(dev, *freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (dmcfreq->info.devfreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) devfreq = dmcfreq->info.devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) devfreq->last_status.current_frequency = *freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rockchip_monitor_volt_adjust_unlock(dmcfreq->mdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int rockchip_dmcfreq_get_dev_status(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct devfreq_dev_status *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct devfreq_event_data edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (i = 0; i < dmcfreq->edev_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = devfreq_event_get_event(dmcfreq->edev[i], &edata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dev_err(dev, "failed to get event %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dmcfreq->edev[i]->desc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (i == dmcfreq->dfi_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) stat->busy_time = edata.load_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) stat->total_time = edata.total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dmcfreq->nocp_bw[i] = edata.load_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int rockchip_dmcfreq_get_cur_freq(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned long *freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) *freq = dmcfreq->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static struct devfreq_dev_profile rockchip_devfreq_dmc_profile = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) .polling_ms = 50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) .target = rockchip_dmcfreq_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .get_dev_status = rockchip_dmcfreq_get_dev_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .get_cur_freq = rockchip_dmcfreq_get_cur_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static inline void reset_last_status(struct devfreq *devfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) devfreq->last_status.total_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) devfreq->last_status.busy_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void of_get_px30_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct px30_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct rk3328_ddr_de_skew_setting *de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) (struct px30_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!de_skew) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) p = (u32 *)dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) p = (u32 *)de_skew->ca_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) p = (u32 *)de_skew->cs0_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) p = (u32 *)de_skew->cs1_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) px30_de_skew_set_2_reg(de_skew, dts_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kfree(de_skew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static void of_get_rk1808_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct rk1808_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) (struct rk1808_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) p = (u32 *)dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) p = (u32 *)dts_timing->ca_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) for (i = 0; i < ARRAY_SIZE(rk1808_dts_ca_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ret |= of_property_read_u32(np_tim, rk1808_dts_ca_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) p = (u32 *)dts_timing->cs0_a_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_a_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_a_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) p = (u32 *)dts_timing->cs0_b_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_b_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_b_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) p = (u32 *)dts_timing->cs1_a_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_a_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_a_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) p = (u32 *)dts_timing->cs1_b_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_b_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_b_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void of_get_rk3128_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct rk3128_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct share_params *init_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) init_timing = (struct share_params *)timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (of_property_read_u32(np, "vop-dclk-mode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) &init_timing->vop_dclk_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) init_timing->vop_dclk_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) p = timing + DTS_PAR_OFFSET / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) for (i = 0; i < ARRAY_SIZE(rk3128_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret |= of_property_read_u32(np_tim, rk3128_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) (struct rk3128_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static uint32_t of_get_rk3228_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) p = timing + DTS_PAR_OFFSET / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) np_tim = of_parse_phandle(np, "rockchip,dram_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) for (i = 0; i < ARRAY_SIZE(rk3228_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret |= of_property_read_u32(np_tim, rk3228_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static void of_get_rk3288_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct rk3288_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct share_params *init_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) init_timing = (struct share_params *)timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (of_property_read_u32(np, "vop-dclk-mode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) &init_timing->vop_dclk_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) init_timing->vop_dclk_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) p = timing + DTS_PAR_OFFSET / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) for (i = 0; i < ARRAY_SIZE(rk3288_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ret |= of_property_read_u32(np_tim, rk3288_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) (struct rk3288_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void of_get_rk3328_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct rk3328_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct rk3328_ddr_de_skew_setting *de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) (struct rk3328_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!de_skew) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) p = (u32 *)dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) for (i = 0; i < ARRAY_SIZE(rk3328_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ret |= of_property_read_u32(np_tim, rk3328_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) p = (u32 *)de_skew->ca_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) p = (u32 *)de_skew->cs0_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) p = (u32 *)de_skew->cs1_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) rk3328_de_skew_setting_2_register(de_skew, dts_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) kfree(de_skew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static void of_get_rv1126_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct device_node *np, uint32_t *timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct rk1808_ddr_dts_config_timing *dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dts_timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) (struct rk1808_ddr_dts_config_timing *)(timing +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) DTS_PAR_OFFSET / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) p = (u32 *)dts_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret |= of_property_read_u32(np_tim, px30_dts_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) p = (u32 *)dts_timing->ca_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) for (i = 0; i < ARRAY_SIZE(rv1126_dts_ca_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ret |= of_property_read_u32(np_tim, rv1126_dts_ca_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) p = (u32 *)dts_timing->cs0_a_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_a_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_a_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) p = (u32 *)dts_timing->cs0_b_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_b_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_b_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) p = (u32 *)dts_timing->cs1_a_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_a_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_a_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) p = (u32 *)dts_timing->cs1_b_de_skew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_b_timing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_b_timing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dts_timing->available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) dts_timing->available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dev_err(dev, "of_get_ddr_timings: fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static struct rk3368_dram_timing *of_get_rk3368_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct rk3368_dram_timing *timing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ret |= of_property_read_u32(np_tim, "dram_spd_bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) &timing->dram_spd_bin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ret |= of_property_read_u32(np_tim, "sr_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) &timing->sr_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret |= of_property_read_u32(np_tim, "pd_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) &timing->pd_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ret |= of_property_read_u32(np_tim, "dram_dll_disb_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) &timing->dram_dll_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ret |= of_property_read_u32(np_tim, "phy_dll_disb_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) &timing->phy_dll_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret |= of_property_read_u32(np_tim, "dram_odt_disb_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) &timing->dram_odt_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret |= of_property_read_u32(np_tim, "phy_odt_disb_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) &timing->phy_odt_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ret |= of_property_read_u32(np_tim, "ddr3_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) &timing->ddr3_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ret |= of_property_read_u32(np_tim, "ddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) &timing->ddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret |= of_property_read_u32(np_tim, "lpddr3_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) &timing->lpddr3_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret |= of_property_read_u32(np_tim, "lpddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) &timing->lpddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ret |= of_property_read_u32(np_tim, "lpddr2_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) &timing->lpddr2_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ret |= of_property_read_u32(np_tim, "phy_clk_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) &timing->phy_clk_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ret |= of_property_read_u32(np_tim, "phy_cmd_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) &timing->phy_cmd_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret |= of_property_read_u32(np_tim, "phy_dqs_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) &timing->phy_dqs_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret |= of_property_read_u32(np_tim, "phy_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) &timing->phy_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret |= of_property_read_u32(np_tim, "ddr_2t",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) &timing->ddr_2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) devm_kfree(dev, timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) devm_kfree(dev, timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) timing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static struct rk3399_dram_timing *of_get_rk3399_timings(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct rk3399_dram_timing *timing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct device_node *np_tim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) np_tim = of_parse_phandle(np, "ddr_timing", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (np_tim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ret = of_property_read_u32(np_tim, "ddr3_speed_bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) &timing->ddr3_speed_bin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ret |= of_property_read_u32(np_tim, "pd_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) &timing->pd_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret |= of_property_read_u32(np_tim, "sr_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) &timing->sr_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ret |= of_property_read_u32(np_tim, "sr_mc_gate_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) &timing->sr_mc_gate_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ret |= of_property_read_u32(np_tim, "srpd_lite_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) &timing->srpd_lite_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ret |= of_property_read_u32(np_tim, "standby_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) &timing->standby_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ret |= of_property_read_u32(np_tim, "auto_lp_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) &timing->auto_lp_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ret |= of_property_read_u32(np_tim, "ddr3_dll_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) &timing->ddr3_dll_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ret |= of_property_read_u32(np_tim, "phy_dll_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) &timing->phy_dll_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ret |= of_property_read_u32(np_tim, "ddr3_odt_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) &timing->ddr3_odt_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ret |= of_property_read_u32(np_tim, "ddr3_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) &timing->ddr3_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ret |= of_property_read_u32(np_tim, "ddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) &timing->ddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ret |= of_property_read_u32(np_tim, "phy_ddr3_ca_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) &timing->phy_ddr3_ca_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ret |= of_property_read_u32(np_tim, "phy_ddr3_dq_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) &timing->phy_ddr3_dq_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ret |= of_property_read_u32(np_tim, "phy_ddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) &timing->phy_ddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ret |= of_property_read_u32(np_tim, "lpddr3_odt_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) &timing->lpddr3_odt_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ret |= of_property_read_u32(np_tim, "lpddr3_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) &timing->lpddr3_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ret |= of_property_read_u32(np_tim, "lpddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) &timing->lpddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ret |= of_property_read_u32(np_tim, "phy_lpddr3_ca_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) &timing->phy_lpddr3_ca_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret |= of_property_read_u32(np_tim, "phy_lpddr3_dq_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) &timing->phy_lpddr3_dq_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret |= of_property_read_u32(np_tim, "phy_lpddr3_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) &timing->phy_lpddr3_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret |= of_property_read_u32(np_tim, "lpddr4_odt_dis_freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) &timing->lpddr4_odt_dis_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret |= of_property_read_u32(np_tim, "lpddr4_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) &timing->lpddr4_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ret |= of_property_read_u32(np_tim, "lpddr4_dq_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) &timing->lpddr4_dq_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ret |= of_property_read_u32(np_tim, "lpddr4_ca_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) &timing->lpddr4_ca_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ret |= of_property_read_u32(np_tim, "phy_lpddr4_ca_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) &timing->phy_lpddr4_ca_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ret |= of_property_read_u32(np_tim, "phy_lpddr4_ck_cs_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) &timing->phy_lpddr4_ck_cs_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ret |= of_property_read_u32(np_tim, "phy_lpddr4_dq_drv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) &timing->phy_lpddr4_dq_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ret |= of_property_read_u32(np_tim, "phy_lpddr4_odt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) &timing->phy_lpddr4_odt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) devm_kfree(dev, timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) devm_kfree(dev, timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) timing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) of_node_put(np_tim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int rockchip_ddr_set_auto_self_refresh(uint32_t en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ddr_psci_param->sr_idle_en = en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ROCKCHIP_SIP_CONFIG_DRAM_SET_AT_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct dmcfreq_wait_ctrl_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) wait_queue_head_t wait_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int wait_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int wait_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int wait_time_out_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int dcf_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct regmap *regmap_dcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static struct dmcfreq_wait_ctrl_t wait_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static irqreturn_t wait_complete_irq(int irqno, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ctrl->wait_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) wake_up(&ctrl->wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static irqreturn_t wait_dcf_complete_irq(int irqno, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (res.a0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ctrl->wait_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) wake_up(&ctrl->wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int rockchip_dmcfreq_wait_complete(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!wait_ctrl.wait_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) pr_err("%s: Do not support time out!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) wait_ctrl.wait_flag = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) enable_irq(wait_ctrl.complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * CPUs only enter WFI when idle to make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * FIQn can quick response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cpu_latency_qos_update_request(&pm_qos, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (wait_ctrl.dcf_en == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* start dcf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) regmap_update_bits(wait_ctrl.regmap_dcf, 0x0, 0x1, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) } else if (wait_ctrl.dcf_en == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_MCU_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) pr_err("rockchip_sip_config_mcu_start error:%lx\n", res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) wait_event_timeout(wait_ctrl.wait_wq, (wait_ctrl.wait_flag == 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) msecs_to_jiffies(wait_ctrl.wait_time_out_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * If waiting for wait_ctrl.complt_irq times out, clear the IRQ and stop the MCU by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * sip_smc_dram(DRAM_POST_SET_RATE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (wait_ctrl.dcf_en == 2 && wait_ctrl.wait_flag != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (res.a0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) cpu_latency_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) disable_irq(wait_ctrl.complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static __maybe_unused int rockchip_get_freq_info(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct dmc_freq_table *freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int i, j, count, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dev_err(dmcfreq->dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dev_err(dmcfreq->dev, "it is no available frequencies!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) for (i = 0; i < ddr_psci_param->freq_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dmcfreq->freq_count = ddr_psci_param->freq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* update dmc_opp_table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) count = dev_pm_opp_get_opp_count(dmcfreq->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ret = count ? count : -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) freq_table = kmalloc(sizeof(struct dmc_freq_table) * count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) for (i = 0, rate = 0; i < count; i++, rate++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* find next rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) opp = dev_pm_opp_find_freq_ceil(dmcfreq->dev, &rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (IS_ERR(opp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) ret = PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dev_err(dmcfreq->dev, "failed to find OPP for freq %lu.\n", rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) freq_table[i].freq = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) freq_table[i].volt = dev_pm_opp_get_voltage(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) for (j = 0; j < dmcfreq->freq_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (rate == dmcfreq->freq_info_rate[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (j == dmcfreq->freq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dev_pm_opp_disable(dmcfreq->dev, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) for (i = 0; i < dmcfreq->freq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) for (j = 0; j < count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (dmcfreq->freq_info_rate[i] == freq_table[j].freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) } else if (dmcfreq->freq_info_rate[i] < freq_table[j].freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) dev_pm_opp_add(dmcfreq->dev, dmcfreq->freq_info_rate[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) freq_table[j].volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (j == count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dev_err(dmcfreq->dev, "failed to match dmc_opp_table for %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) dmcfreq->freq_info_rate[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) dmcfreq->freq_count = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) kfree(freq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static __maybe_unused int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) rockchip_dmcfreq_adjust_opp_table(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct opp_table *opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned long target_rate = 0, last_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev_err(dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) dev_err(dev, "there is no available frequencies!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) for (i = 0; i < ddr_psci_param->freq_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dmcfreq->freq_count = ddr_psci_param->freq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) opp_table = dev_pm_opp_get_opp_table(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!opp_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) mutex_lock(&opp_table->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) list_for_each_entry(opp, &opp_table->opp_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!opp->available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* Search for a rounded floor frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) target_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) for (i = 0; i < dmcfreq->freq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (dmcfreq->freq_info_rate[i] <= opp->rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) target_rate = dmcfreq->freq_info_rate[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* If not find, disable the opp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (!target_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) opp->available = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* If the opp rate is equal to last opp rate, disable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (target_rate == last_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) opp->available = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) opp->rate = target_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) last_rate = opp->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mutex_unlock(&opp_table->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) dev_pm_opp_put_opp_table(opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) dev_err(dev, "there is no available opp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static __maybe_unused int px30_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) u32 complt_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct irq_data *complt_irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) res = sip_smc_dram(0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (res.a0 || res.a1 < 0x103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) "trusted firmware need to update or is invalid!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * after 4KB * N is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) size = sizeof(struct px30_ddr_dts_config_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) of_get_px30_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) init_waitqueue_head(&wait_ctrl.wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) wait_ctrl.wait_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) wait_ctrl.wait_time_out_ms = 17 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) complt_irq = platform_get_irq_byname(pdev, "complete_irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (complt_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) wait_ctrl.complt_irq = complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = devm_request_irq(&pdev->dev, complt_irq, wait_complete_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 0, dev_name(&pdev->dev), &wait_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dev_err(&pdev->dev, "cannot request complete_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) disable_irq(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) complt_irq_data = irq_get_irq_data(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) complt_hwirq = irqd_to_hwirq(complt_irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ddr_psci_param->complt_hwirq = complt_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static __maybe_unused int rk1808_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) res = sip_smc_dram(0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (res.a0 || res.a1 < 0x101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) "trusted firmware need to update or is invalid!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * after 4KB * N is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) size = sizeof(struct rk1808_ddr_dts_config_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) of_get_rk1808_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /* enable start dcf in kernel after dcf ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) node = of_parse_phandle(pdev->dev.of_node, "dcf_reg", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (IS_ERR(wait_ctrl.regmap_dcf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return PTR_ERR(wait_ctrl.regmap_dcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) wait_ctrl.dcf_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) init_waitqueue_head(&wait_ctrl.wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) wait_ctrl.wait_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) wait_ctrl.wait_time_out_ms = 17 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) complt_irq = platform_get_irq_byname(pdev, "complete_irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (complt_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) wait_ctrl.complt_irq = complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 0, dev_name(&pdev->dev), &wait_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dev_err(&pdev->dev, "cannot request complete_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) disable_irq(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static __maybe_unused int rk3128_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct rk3128_ddr_dts_config_timing),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 4096) + 1, SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) of_get_rk3128_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) ddr_psci_param->hz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static __maybe_unused int rk3228_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct rk3228_ddr_dts_config_timing),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 4096) + 1, SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (of_get_rk3228_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) (uint32_t *)ddr_psci_param))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) ddr_psci_param->hz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static __maybe_unused int rk3288_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct clk *pclk_phy, *pclk_upctl, *dmc_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) dmc_clk = devm_clk_get(dev, "dmc_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (IS_ERR(dmc_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dev_err(dev, "Cannot get the clk dmc_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return PTR_ERR(dmc_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) ret = clk_prepare_enable(dmc_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dev_err(dev, "failed to prepare/enable dmc_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) pclk_phy = devm_clk_get(dev, "pclk_phy0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (IS_ERR(pclk_phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) dev_err(dev, "Cannot get the clk pclk_phy0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return PTR_ERR(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ret = clk_prepare_enable(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dev_err(dev, "failed to prepare/enable pclk_phy0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) pclk_upctl = devm_clk_get(dev, "pclk_upctl0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (IS_ERR(pclk_upctl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dev_err(dev, "Cannot get the clk pclk_upctl0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return PTR_ERR(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ret = clk_prepare_enable(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) pclk_phy = devm_clk_get(dev, "pclk_phy1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (IS_ERR(pclk_phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) dev_err(dev, "Cannot get the clk pclk_phy1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return PTR_ERR(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) ret = clk_prepare_enable(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) dev_err(dev, "failed to prepare/enable pclk_phy1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) pclk_upctl = devm_clk_get(dev, "pclk_upctl1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (IS_ERR(pclk_upctl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) dev_err(dev, "Cannot get the clk pclk_upctl1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return PTR_ERR(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) ret = clk_prepare_enable(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct rk3288_ddr_dts_config_timing),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 4096) + 1, SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) of_get_rk3288_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) ddr_psci_param->hz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static __maybe_unused int rk3328_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) res = sip_smc_dram(0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (res.a0 || (res.a1 < 0x101)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) "trusted firmware need to update or is invalid!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * after 4KB * N is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) size = sizeof(struct rk3328_ddr_dts_config_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) of_get_rk3328_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static __maybe_unused int rk3368_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct rk3368_dram_timing *dram_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct clk *pclk_phy, *pclk_upctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) u32 dram_spd_bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) u32 addr_mcu_el3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) u32 dclk_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) u32 lcdc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) pclk_phy = devm_clk_get(dev, "pclk_phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (IS_ERR(pclk_phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dev_err(dev, "Cannot get the clk pclk_phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return PTR_ERR(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ret = clk_prepare_enable(pclk_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dev_err(dev, "failed to prepare/enable pclk_phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) pclk_upctl = devm_clk_get(dev, "pclk_upctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (IS_ERR(pclk_upctl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dev_err(dev, "Cannot get the clk pclk_upctl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return PTR_ERR(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) ret = clk_prepare_enable(pclk_upctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) dev_err(dev, "failed to prepare/enable pclk_upctl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * Get dram timing and pass it to arm trust firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * the dram drvier in arm trust firmware will get these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * timing and to do dram initial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dram_timing = of_get_rk3368_timings(dev, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (dram_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dram_spd_bin = dram_timing->dram_spd_bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (scpi_ddr_send_timing((u32 *)dram_timing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) sizeof(struct rk3368_dram_timing)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) dev_err(dev, "send ddr timing timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dev_err(dev, "get ddr timing from dts error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) dram_spd_bin = DDR3_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) res = sip_smc_mcu_el3fiq(FIQ_INIT_HANDLER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) FIQ_NUM_FOR_DCF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) FIQ_CPU_TGT_BOOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if ((res.a0) || (res.a1 == 0) || (res.a1 > 0x80000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) dev_err(dev, "Trust version error, pls check trust version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) addr_mcu_el3 = res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (of_property_read_u32(np, "vop-dclk-mode", &dclk_mode) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) scpi_ddr_dclk_mode(dclk_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dmcfreq->set_rate_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (!dmcfreq->set_rate_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) lcdc_type = rk_drm_get_lcdc_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (scpi_ddr_init(dram_spd_bin, 0, lcdc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) addr_mcu_el3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) dev_err(dev, "ddr init error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) dev_dbg(dev, ("%s out\n"), __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) dmcfreq->set_auto_self_refresh = scpi_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static int rk3399_set_msch_readlatency(unsigned int readlatency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, readlatency, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ROCKCHIP_SIP_CONFIG_DRAM_SET_MSCH_RL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 0, 0, 0, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static __maybe_unused int rk3399_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct rk3399_dram_timing *dram_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int index, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) u32 *timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * Get dram timing and pass it to arm trust firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * the dram drvier in arm trust firmware will get these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * timing and to do dram initial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) dram_timing = of_get_rk3399_timings(dev, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (dram_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) timing = (u32 *)dram_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) size = sizeof(struct rk3399_dram_timing) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) for (index = 0; index < size; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 0, 0, 0, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) dev_err(dev, "Failed to set dram param: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) dmcfreq->set_rate_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (!dmcfreq->set_rate_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ROCKCHIP_SIP_CONFIG_DRAM_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 0, 0, 0, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) dmcfreq->info.set_msch_readlatency = rk3399_set_msch_readlatency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static __maybe_unused int rk3568_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) res = sip_smc_dram(0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (res.a0 || res.a1 < 0x101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) dev_err(&pdev->dev, "trusted firmware need update to V1.01 and above.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * after 4KB is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * request share memory size 4KB * 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /* Clear ddr_psci_param, size is 4KB * 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) memset_io(ddr_psci_param, 0x0, 4096 * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* start mcu with sip_smc_dram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) wait_ctrl.dcf_en = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) init_waitqueue_head(&wait_ctrl.wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) wait_ctrl.wait_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) wait_ctrl.wait_time_out_ms = 17 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) complt_irq = platform_get_irq_byname(pdev, "complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (complt_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) wait_ctrl.complt_irq = complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 0, dev_name(&pdev->dev), &wait_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) dev_err(&pdev->dev, "cannot request complt_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) disable_irq(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) ret = rockchip_get_freq_info(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) dev_err(&pdev->dev, "cannot get frequency info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) dmcfreq->is_set_rate_direct = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static __maybe_unused int rk3588_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) unsigned long opp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) dev_err(&pdev->dev, "trusted firmware unsupported, please update.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * after 4KB is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * request share memory size 4KB * 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Clear ddr_psci_param, size is 4KB * 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) memset_io(ddr_psci_param, 0x0, 4096 * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* start mcu with sip_smc_dram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) wait_ctrl.dcf_en = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) init_waitqueue_head(&wait_ctrl.wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) wait_ctrl.wait_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) wait_ctrl.wait_time_out_ms = 17 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) complt_irq = platform_get_irq_byname(pdev, "complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (complt_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) wait_ctrl.complt_irq = complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 0, dev_name(&pdev->dev), &wait_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) dev_err(&pdev->dev, "cannot request complt_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) disable_irq(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) ret = rockchip_dmcfreq_adjust_opp_table(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) dev_err(&pdev->dev, "cannot get frequency info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) dmcfreq->is_set_rate_direct = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /* Config the dmcfreq->sleep_volt for deepsleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) opp_rate = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) opp = devfreq_recommended_opp(&pdev->dev, &opp_rate, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (IS_ERR(opp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) dev_err(&pdev->dev, "Failed to find opp for %lu Hz\n", opp_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) return PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) dmcfreq->sleep_volt = opp->supplies[0].u_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (dmcfreq->regulator_count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) dmcfreq->sleep_mem_volt = opp->supplies[1].u_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) static __maybe_unused int rv1126_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) res = sip_smc_dram(0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (res.a0 || res.a1 < 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) "trusted firmware need to update or is invalid!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * first 4KB is used for interface parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * after 4KB * N is dts parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) size = sizeof(struct rk1808_ddr_dts_config_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 4096) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) SHARE_PAGE_TYPE_DDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (res.a0 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) dev_err(&pdev->dev, "no ATF memory for init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ddr_psci_param = (struct share_params *)res.a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) of_get_rv1126_timings(&pdev->dev, pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) (uint32_t *)ddr_psci_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* enable start dcf in kernel after dcf ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) node = of_parse_phandle(pdev->dev.of_node, "dcf", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (IS_ERR(wait_ctrl.regmap_dcf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return PTR_ERR(wait_ctrl.regmap_dcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) wait_ctrl.dcf_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) init_waitqueue_head(&wait_ctrl.wait_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) wait_ctrl.wait_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) wait_ctrl.wait_time_out_ms = 17 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) complt_irq = platform_get_irq_byname(pdev, "complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (complt_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) wait_ctrl.complt_irq = complt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 0, dev_name(&pdev->dev), &wait_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) dev_err(&pdev->dev, "cannot request complt_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) disable_irq(complt_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (of_property_read_u32(pdev->dev.of_node, "update_drv_odt_cfg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) &ddr_psci_param->update_drv_odt_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ddr_psci_param->update_drv_odt_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (of_property_read_u32(pdev->dev.of_node, "update_deskew_cfg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) &ddr_psci_param->update_deskew_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) ddr_psci_param->update_deskew_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) dmcfreq->set_rate_params = ddr_psci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ROCKCHIP_SIP_CONFIG_DRAM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) static const struct of_device_id rockchip_dmcfreq_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) #if IS_ENABLED(CONFIG_CPU_PX30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) { .compatible = "rockchip,px30-dmc", .data = px30_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) #if IS_ENABLED(CONFIG_CPU_RK1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) { .compatible = "rockchip,rk1808-dmc", .data = rk1808_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) #if IS_ENABLED(CONFIG_CPU_RK312X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) { .compatible = "rockchip,rk3128-dmc", .data = rk3128_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) #if IS_ENABLED(CONFIG_CPU_RK322X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) { .compatible = "rockchip,rk3228-dmc", .data = rk3228_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) #if IS_ENABLED(CONFIG_CPU_RK3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) { .compatible = "rockchip,rk3288-dmc", .data = rk3288_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) #if IS_ENABLED(CONFIG_CPU_RK3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) { .compatible = "rockchip,rk3308-dmc", .data = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) #if IS_ENABLED(CONFIG_CPU_RK3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) { .compatible = "rockchip,rk3328-dmc", .data = rk3328_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) #if IS_ENABLED(CONFIG_CPU_RK3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) { .compatible = "rockchip,rk3368-dmc", .data = rk3368_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) #if IS_ENABLED(CONFIG_CPU_RK3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) { .compatible = "rockchip,rk3399-dmc", .data = rk3399_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) #if IS_ENABLED(CONFIG_CPU_RK3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) { .compatible = "rockchip,rk3568-dmc", .data = rk3568_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) #if IS_ENABLED(CONFIG_CPU_RK3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) { .compatible = "rockchip,rk3588-dmc", .data = rk3588_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) #if IS_ENABLED(CONFIG_CPU_RV1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) { .compatible = "rockchip,rv1126-dmc", .data = rv1126_dmc_init },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) MODULE_DEVICE_TABLE(of, rockchip_dmcfreq_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static int rockchip_get_freq_map_talbe(struct device_node *np, char *porp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) struct freq_map_table **table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct freq_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) const struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) unsigned int temp_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) int count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) prop = of_find_property(np, porp_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!prop->value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) count = of_property_count_u32_elems(np, porp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (count % 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) tbl = kzalloc(sizeof(*tbl) * (count / 3 + 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) for (i = 0; i < count / 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) of_property_read_u32_index(np, porp_name, 3 * i, &tbl[i].min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) of_property_read_u32_index(np, porp_name, 3 * i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) &tbl[i].max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) of_property_read_u32_index(np, porp_name, 3 * i + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) &temp_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) tbl[i].freq = temp_freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) tbl[i].min = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) tbl[i].max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) tbl[i].freq = DMCFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) *table = tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) static int rockchip_get_rl_map_talbe(struct device_node *np, char *porp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) struct rl_map_table **table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct rl_map_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) const struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) int count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) prop = of_find_property(np, porp_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (!prop->value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) count = of_property_count_u32_elems(np, porp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (count % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) tbl = kzalloc(sizeof(*tbl) * (count / 2 + 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) for (i = 0; i < count / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) of_property_read_u32_index(np, porp_name, 2 * i, &tbl[i].pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) of_property_read_u32_index(np, porp_name, 2 * i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) &tbl[i].rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) tbl[i].pn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) tbl[i].rl = DMCFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) *table = tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static int rockchip_get_system_status_rate(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) char *porp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) const struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) unsigned int status = 0, freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) unsigned long temp_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) int count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) prop = of_find_property(np, porp_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (!prop->value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) count = of_property_count_u32_elems(np, porp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (count % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) for (i = 0; i < count / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) of_property_read_u32_index(np, porp_name, 2 * i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) of_property_read_u32_index(np, porp_name, 2 * i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) &freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) case SYS_STATUS_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) dmcfreq->normal_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) case SYS_STATUS_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) dmcfreq->suspend_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) case SYS_STATUS_VIDEO_1080P:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) dmcfreq->video_1080p_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) case SYS_STATUS_VIDEO_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) dmcfreq->video_4k_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) case SYS_STATUS_VIDEO_4K_10B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) dmcfreq->video_4k_10b_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) case SYS_STATUS_VIDEO_SVEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) dmcfreq->video_svep_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) case SYS_STATUS_PERFORMANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) dmcfreq->performance_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) case SYS_STATUS_HDMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) dmcfreq->hdmi_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) case SYS_STATUS_HDMIRX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) dmcfreq->hdmirx_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) case SYS_STATUS_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) dmcfreq->idle_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) case SYS_STATUS_REBOOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) dmcfreq->reboot_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) case SYS_STATUS_BOOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) dmcfreq->boost_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) case SYS_STATUS_ISP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) case SYS_STATUS_CIF0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) case SYS_STATUS_CIF1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) case SYS_STATUS_DUALVIEW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) temp_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (dmcfreq->fixed_rate < temp_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) dmcfreq->fixed_rate = temp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) case SYS_STATUS_LOW_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) dmcfreq->low_power_rate = freq * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static unsigned long rockchip_freq_level_2_rate(struct rockchip_dmcfreq *dmcfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) unsigned int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) unsigned long rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) switch (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) case DMC_FREQ_LEVEL_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) rate = dmcfreq->rate_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) case DMC_FREQ_LEVEL_MID_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) rate = dmcfreq->rate_mid_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) case DMC_FREQ_LEVEL_MID_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rate = dmcfreq->rate_mid_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) case DMC_FREQ_LEVEL_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) rate = dmcfreq->rate_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static int rockchip_get_system_status_level(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) char *porp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) const struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) unsigned int status = 0, level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) unsigned long temp_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) int count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) prop = of_find_property(np, porp_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (!prop->value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) count = of_property_count_u32_elems(np, porp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (count % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (dmcfreq->freq_count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) dmcfreq->rate_high = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) } else if (dmcfreq->freq_count == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) dmcfreq->rate_high = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) } else if (dmcfreq->freq_count == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) dmcfreq->rate_high = dmcfreq->freq_info_rate[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) } else if (dmcfreq->freq_count == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) dmcfreq->rate_high = dmcfreq->freq_info_rate[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) } else if (dmcfreq->freq_count == 5 || dmcfreq->freq_count == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) dmcfreq->rate_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) dmcfreq->auto_min_rate = dmcfreq->rate_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) for (i = 0; i < count / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) of_property_read_u32_index(np, porp_name, 2 * i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) of_property_read_u32_index(np, porp_name, 2 * i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) case SYS_STATUS_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) dmcfreq->normal_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) dev_info(dmcfreq->dev, "normal_rate = %ld\n", dmcfreq->normal_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) case SYS_STATUS_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) dmcfreq->suspend_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) dev_info(dmcfreq->dev, "suspend_rate = %ld\n", dmcfreq->suspend_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) case SYS_STATUS_VIDEO_1080P:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) dmcfreq->video_1080p_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) dev_info(dmcfreq->dev, "video_1080p_rate = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) dmcfreq->video_1080p_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) case SYS_STATUS_VIDEO_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) dmcfreq->video_4k_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) dev_info(dmcfreq->dev, "video_4k_rate = %ld\n", dmcfreq->video_4k_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) case SYS_STATUS_VIDEO_4K_10B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) dmcfreq->video_4k_10b_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) dev_info(dmcfreq->dev, "video_4k_10b_rate = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) dmcfreq->video_4k_10b_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) case SYS_STATUS_VIDEO_SVEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) dmcfreq->video_svep_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) dev_info(dmcfreq->dev, "video_svep_rate = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) dmcfreq->video_svep_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) case SYS_STATUS_PERFORMANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) dmcfreq->performance_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) dev_info(dmcfreq->dev, "performance_rate = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) dmcfreq->performance_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) case SYS_STATUS_HDMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) dmcfreq->hdmi_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) dev_info(dmcfreq->dev, "hdmi_rate = %ld\n", dmcfreq->hdmi_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) case SYS_STATUS_HDMIRX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) dmcfreq->hdmirx_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) dev_info(dmcfreq->dev, "hdmirx_rate = %ld\n", dmcfreq->hdmirx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) case SYS_STATUS_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) dmcfreq->idle_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) dev_info(dmcfreq->dev, "idle_rate = %ld\n", dmcfreq->idle_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) case SYS_STATUS_REBOOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) dmcfreq->reboot_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) dev_info(dmcfreq->dev, "reboot_rate = %ld\n", dmcfreq->reboot_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) case SYS_STATUS_BOOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) dmcfreq->boost_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) dev_info(dmcfreq->dev, "boost_rate = %ld\n", dmcfreq->boost_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) case SYS_STATUS_ISP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) case SYS_STATUS_CIF0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) case SYS_STATUS_CIF1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) case SYS_STATUS_DUALVIEW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) temp_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (dmcfreq->fixed_rate < temp_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) dmcfreq->fixed_rate = temp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) dev_info(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) "fixed_rate(isp|cif0|cif1|dualview) = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) dmcfreq->fixed_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) case SYS_STATUS_LOW_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) dmcfreq->low_power_rate = rockchip_freq_level_2_rate(dmcfreq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) dev_info(dmcfreq->dev, "low_power_rate = %ld\n", dmcfreq->low_power_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static void rockchip_dmcfreq_update_target(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) struct devfreq *devfreq = dmcfreq->info.devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) mutex_lock(&devfreq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) update_devfreq(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) mutex_unlock(&devfreq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) static int rockchip_dmcfreq_system_status_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) unsigned long status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct rockchip_dmcfreq *dmcfreq = system_status_to_dmcfreq(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) unsigned long target_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) unsigned int refresh = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) bool is_fixed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (dmcfreq->fixed_rate && (is_dualview(status) || is_isp(status))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (dmcfreq->is_fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) is_fixed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) target_rate = dmcfreq->fixed_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (dmcfreq->reboot_rate && (status & SYS_STATUS_REBOOT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) devfreq_monitor_stop(dmcfreq->info.devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) target_rate = dmcfreq->reboot_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (dmcfreq->suspend_rate && (status & SYS_STATUS_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) target_rate = dmcfreq->suspend_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) refresh = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (dmcfreq->low_power_rate && (status & SYS_STATUS_LOW_POWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) target_rate = dmcfreq->low_power_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (dmcfreq->performance_rate && (status & SYS_STATUS_PERFORMANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (dmcfreq->performance_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) target_rate = dmcfreq->performance_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (dmcfreq->hdmi_rate && (status & SYS_STATUS_HDMI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (dmcfreq->hdmi_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) target_rate = dmcfreq->hdmi_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (dmcfreq->hdmirx_rate && (status & SYS_STATUS_HDMIRX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (dmcfreq->hdmirx_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) target_rate = dmcfreq->hdmirx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (dmcfreq->video_4k_rate && (status & SYS_STATUS_VIDEO_4K)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) if (dmcfreq->video_4k_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) target_rate = dmcfreq->video_4k_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (dmcfreq->video_4k_10b_rate && (status & SYS_STATUS_VIDEO_4K_10B)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (dmcfreq->video_4k_10b_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) target_rate = dmcfreq->video_4k_10b_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (dmcfreq->video_1080p_rate && (status & SYS_STATUS_VIDEO_1080P)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (dmcfreq->video_1080p_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) target_rate = dmcfreq->video_1080p_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (dmcfreq->video_svep_rate && (status & SYS_STATUS_VIDEO_SVEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (dmcfreq->video_svep_rate > target_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) target_rate = dmcfreq->video_svep_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) dev_dbg(dmcfreq->dev, "status=0x%x\n", (unsigned int)status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) dmcfreq->is_fixed = is_fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) dmcfreq->status_rate = target_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (dmcfreq->refresh != refresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (dmcfreq->set_auto_self_refresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) dmcfreq->set_auto_self_refresh(refresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) dmcfreq->refresh = refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) rockchip_dmcfreq_update_target(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) static int rockchip_dmcfreq_panic_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) unsigned long v, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) struct rockchip_dmcfreq *dmcfreq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) container_of(nb, struct rockchip_dmcfreq, panic_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) rockchip_opp_dump_cur_state(dmcfreq->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) static ssize_t rockchip_dmcfreq_status_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) unsigned int status = rockchip_get_system_status();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return sprintf(buf, "0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static ssize_t rockchip_dmcfreq_status_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) rockchip_update_system_status(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) static DEVICE_ATTR(system_status, 0644, rockchip_dmcfreq_status_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) rockchip_dmcfreq_status_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static ssize_t upthreshold_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) return sprintf(buf, "%d\n", data->upthreshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) static ssize_t upthreshold_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (kstrtouint(buf, 10, &value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) data->upthreshold = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) static DEVICE_ATTR_RW(upthreshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) static ssize_t downdifferential_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return sprintf(buf, "%d\n", data->downdifferential);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) static ssize_t downdifferential_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (kstrtouint(buf, 10, &value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) data->downdifferential = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static DEVICE_ATTR_RW(downdifferential);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) static unsigned long get_nocp_req_rate(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) unsigned long target = 0, cpu_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (!dmcfreq->cpu_bw_tbl || dmcfreq->nocp_cpu_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) cpu_bw = dmcfreq->nocp_bw[dmcfreq->nocp_cpu_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) for (i = 0; dmcfreq->cpu_bw_tbl[i].freq != CPUFREQ_TABLE_END; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (cpu_bw >= dmcfreq->cpu_bw_tbl[i].min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) target = dmcfreq->cpu_bw_tbl[i].freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) static int devfreq_dmc_ondemand_func(struct devfreq *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) unsigned long *freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct devfreq_dev_status *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) unsigned long long a, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(df->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) unsigned int upthreshold = data->upthreshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) unsigned int downdifferential = data->downdifferential;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) unsigned long target_freq = 0, nocp_req_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (dmcfreq->info.auto_freq_en && !dmcfreq->is_fixed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (dmcfreq->status_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) target_freq = dmcfreq->status_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) else if (dmcfreq->auto_min_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) target_freq = dmcfreq->auto_min_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) nocp_req_rate = get_nocp_req_rate(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) target_freq = max3(target_freq, nocp_req_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) dmcfreq->info.vop_req_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) now = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if (now < dmcfreq->touchboostpulse_endtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) target_freq = max(target_freq, dmcfreq->boost_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (dmcfreq->status_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) target_freq = dmcfreq->status_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) else if (dmcfreq->normal_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) target_freq = dmcfreq->normal_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if (target_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) *freq = target_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) if (dmcfreq->info.auto_freq_en && !devfreq_update_stats(df))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) goto reset_last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (!upthreshold || !downdifferential)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) goto reset_last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (upthreshold > 100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) upthreshold < downdifferential)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) goto reset_last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) err = devfreq_update_stats(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) goto reset_last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) stat = &df->last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) /* Assume MAX if it is going to be divided by zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (stat->total_time == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) *freq = DEVFREQ_MAX_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) /* Prevent overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) stat->busy_time >>= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) stat->total_time >>= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /* Set MAX if it's busy enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (stat->busy_time * 100 >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) stat->total_time * upthreshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) *freq = DEVFREQ_MAX_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /* Set MAX if we do not know the initial frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (stat->current_frequency == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) *freq = DEVFREQ_MAX_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) /* Keep the current frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (stat->busy_time * 100 >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) stat->total_time * (upthreshold - downdifferential)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) *freq = max(target_freq, stat->current_frequency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) /* Set the desired frequency based on the load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) a = stat->busy_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) a *= stat->current_frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) b = div_u64(a, stat->total_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) b *= 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) b = div_u64(b, (upthreshold - downdifferential / 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) *freq = max_t(unsigned long, target_freq, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) reset_last_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) reset_last_status(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) static int devfreq_dmc_ondemand_handler(struct devfreq *devfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) unsigned int event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(devfreq->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (!dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) case DEVFREQ_GOV_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) devfreq_monitor_start(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) case DEVFREQ_GOV_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) devfreq_monitor_stop(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) case DEVFREQ_GOV_UPDATE_INTERVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) devfreq_update_interval(devfreq, (unsigned int *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) case DEVFREQ_GOV_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) devfreq_monitor_suspend(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) case DEVFREQ_GOV_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) devfreq_monitor_resume(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) static struct devfreq_governor devfreq_dmc_ondemand = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) .name = "dmc_ondemand",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) .get_target_freq = devfreq_dmc_ondemand_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) .event_handler = devfreq_dmc_ondemand_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) static int rockchip_dmcfreq_enable_event(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (!dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) for (i = 0; i < dmcfreq->edev_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) ret = devfreq_event_enable_edev(dmcfreq->edev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) "failed to enable devfreq-event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) static int rockchip_dmcfreq_disable_event(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (!dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) for (i = 0; i < dmcfreq->edev_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) ret = devfreq_event_disable_edev(dmcfreq->edev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) "failed to disable devfreq-event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static int rockchip_get_edev_id(struct rockchip_dmcfreq *dmcfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) struct devfreq_event_dev *edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) for (i = 0; i < dmcfreq->edev_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) edev = dmcfreq->edev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (!strcmp(edev->desc->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) static int rockchip_dmcfreq_get_event(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) struct device_node *events_np, *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int i, j, count, available_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) count = devfreq_event_get_edev_count(dev, "devfreq-events");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) dev_dbg(dev, "failed to get count of devfreq-event dev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) events_np = of_parse_phandle(np, "devfreq-events", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (!events_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (of_device_is_available(events_np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) available_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) of_node_put(events_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (!available_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) dev_dbg(dev, "failed to get available devfreq-event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) dmcfreq->edev_count = available_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) dmcfreq->edev = devm_kzalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) sizeof(*dmcfreq->edev) * available_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (!dmcfreq->edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) for (i = 0, j = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) events_np = of_parse_phandle(np, "devfreq-events", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) if (!events_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) if (of_device_is_available(events_np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) of_node_put(events_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if (j >= available_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) dev_err(dev, "invalid event conut\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) dmcfreq->edev[j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if (IS_ERR(dmcfreq->edev[j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) of_node_put(events_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) dmcfreq->info.auto_freq_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) dmcfreq->dfi_id = rockchip_get_edev_id(dmcfreq, "dfi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) dmcfreq->nocp_cpu_id = rockchip_get_edev_id(dmcfreq, "nocp-cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) dmcfreq->nocp_bw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) devm_kzalloc(dev, sizeof(*dmcfreq->nocp_bw) * available_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (!dmcfreq->nocp_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static int rockchip_dmcfreq_power_control(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct opp_table *opp_table = NULL, *reg_opp_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) const char * const reg_names[] = {"center", "mem"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (of_find_property(np, "mem-supply", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) dmcfreq->regulator_count = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) dmcfreq->regulator_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) reg_opp_table = dev_pm_opp_set_regulators(dev, reg_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) dmcfreq->regulator_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (IS_ERR(reg_opp_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) dev_err(dev, "failed to set regulators\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) return PTR_ERR(reg_opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) opp_table = dev_pm_opp_register_set_opp_helper(dev, rockchip_dmcfreq_opp_helper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (IS_ERR(opp_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) dev_err(dev, "failed to set opp helper\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) ret = PTR_ERR(opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) goto reg_opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) dmcfreq->vdd_center = devm_regulator_get_optional(dev, "center");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (IS_ERR(dmcfreq->vdd_center)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) dev_err(dev, "Cannot get the regulator \"center\"\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) ret = PTR_ERR(dmcfreq->vdd_center);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) goto opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (dmcfreq->regulator_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) dmcfreq->mem_reg = devm_regulator_get_optional(dev, "mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (IS_ERR(dmcfreq->mem_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) dev_err(dev, "Cannot get the regulator \"mem\"\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ret = PTR_ERR(dmcfreq->mem_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) goto opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) dmcfreq->dmc_clk = devm_clk_get(dev, "dmc_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (IS_ERR(dmcfreq->dmc_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) dev_err(dev, "Cannot get the clk dmc_clk. If using SCMI, trusted firmware need update to V1.01 and above.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) ret = PTR_ERR(dmcfreq->dmc_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) goto opp_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) opp_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) if (opp_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) dev_pm_opp_unregister_set_opp_helper(opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) reg_opp_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (reg_opp_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) dev_pm_opp_put_regulators(reg_opp_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static int rockchip_dmcfreq_dmc_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) int (*init)(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) struct rockchip_dmcfreq *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) match = of_match_node(rockchip_dmcfreq_of_match, pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) init = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) ret = init(pdev, dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static void rockchip_dmcfreq_parse_dt(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (!rockchip_get_system_status_rate(np, "system-status-freq", dmcfreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) dmcfreq->system_status_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) else if (!rockchip_get_system_status_level(np, "system-status-level", dmcfreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) dmcfreq->system_status_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) of_property_read_u32(np, "min-cpu-freq", &dmcfreq->min_cpu_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) of_property_read_u32(np, "upthreshold",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) &dmcfreq->ondemand_data.upthreshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) of_property_read_u32(np, "downdifferential",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) &dmcfreq->ondemand_data.downdifferential);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) if (dmcfreq->info.auto_freq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) of_property_read_u32(np, "auto-freq-en",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) &dmcfreq->info.auto_freq_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (!dmcfreq->auto_min_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) of_property_read_u32(np, "auto-min-freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) (u32 *)&dmcfreq->auto_min_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) dmcfreq->auto_min_rate *= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (rockchip_get_freq_map_talbe(np, "cpu-bw-dmc-freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) &dmcfreq->cpu_bw_tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) dev_dbg(dev, "failed to get cpu bandwidth to dmc rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) if (rockchip_get_freq_map_talbe(np, "vop-frame-bw-dmc-freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) &dmcfreq->info.vop_frame_bw_tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) dev_dbg(dev, "failed to get vop frame bandwidth to dmc rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (rockchip_get_freq_map_talbe(np, "vop-bw-dmc-freq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) &dmcfreq->info.vop_bw_tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) dev_err(dev, "failed to get vop bandwidth to dmc rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (rockchip_get_rl_map_talbe(np, "vop-pn-msch-readlatency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) &dmcfreq->info.vop_pn_rl_tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) dev_err(dev, "failed to get vop pn to msch rl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) of_property_read_u32(np, "touchboost_duration",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) (u32 *)&dmcfreq->touchboostpulse_duration_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (dmcfreq->touchboostpulse_duration_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) dmcfreq->touchboostpulse_duration_val *= USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dmcfreq->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static int rockchip_dmcfreq_set_volt_only(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) unsigned long opp_volt, opp_rate = dmcfreq->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) opp = devfreq_recommended_opp(dev, &opp_rate, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (IS_ERR(opp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) return PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) opp_volt = dev_pm_opp_get_voltage(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) ret = regulator_set_voltage(dmcfreq->vdd_center, opp_volt, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) dev_err(dev, "Cannot set voltage %lu uV\n", opp_volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) static int rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) struct devfreq_dev_profile *devp = &rockchip_devfreq_dmc_profile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) struct device *dev = dmcfreq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) struct dev_pm_opp *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) struct devfreq *devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) unsigned long opp_rate = dmcfreq->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) opp = devfreq_recommended_opp(dev, &opp_rate, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (IS_ERR(opp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) return PTR_ERR(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) dev_pm_opp_put(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) devp->initial_freq = dmcfreq->rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) devfreq = devm_devfreq_add_device(dev, devp, "dmc_ondemand",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) &dmcfreq->ondemand_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (IS_ERR(devfreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) dev_err(dev, "failed to add devfreq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return PTR_ERR(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) devm_devfreq_register_opp_notifier(dev, devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) devfreq->last_status.current_frequency = opp_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) reset_last_status(devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) dmcfreq->info.devfreq = devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) static void rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) if (vop_register_dmc())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) dev_err(dmcfreq->dev, "fail to register notify to vop.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) dmcfreq->status_nb.notifier_call =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) rockchip_dmcfreq_system_status_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) ret = rockchip_register_system_status_notifier(&dmcfreq->status_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) dev_err(dmcfreq->dev, "failed to register system_status nb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) dmcfreq->panic_nb.notifier_call = rockchip_dmcfreq_panic_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) ret = atomic_notifier_chain_register(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) &dmcfreq->panic_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) dev_err(dmcfreq->dev, "failed to register panic nb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) dmc_mdevp.data = dmcfreq->info.devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) dmcfreq->mdev_info = rockchip_system_monitor_register(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) &dmc_mdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if (IS_ERR(dmcfreq->mdev_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) dev_dbg(dmcfreq->dev, "without without system monitor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) dmcfreq->mdev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) static void rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) struct devfreq *devfreq = dmcfreq->info.devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_upthreshold.attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) "failed to register upthreshold sysfs file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (sysfs_create_file(&devfreq->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) &dev_attr_downdifferential.attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) "failed to register downdifferential sysfs file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (!rockchip_add_system_status_interface(&devfreq->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (sysfs_create_file(&devfreq->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) &dev_attr_system_status.attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) "failed to register system_status sysfs file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static void rockchip_dmcfreq_boost_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) struct rockchip_dmcfreq *dmcfreq = boost_to_dmcfreq(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) rockchip_dmcfreq_update_target(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) static void rockchip_dmcfreq_input_event(struct input_handle *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) unsigned int code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) struct rockchip_dmcfreq *dmcfreq = handle->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) u64 now, endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (type != EV_ABS && type != EV_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) now = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) endtime = now + dmcfreq->touchboostpulse_duration_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) if (endtime < (dmcfreq->touchboostpulse_endtime + 10 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) dmcfreq->touchboostpulse_endtime = endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) queue_work(system_freezable_wq, &dmcfreq->boost_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) static int rockchip_dmcfreq_input_connect(struct input_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) struct input_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) const struct input_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) struct input_handle *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) struct rockchip_dmcfreq *dmcfreq = input_hd_to_dmcfreq(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) handle = kzalloc(sizeof(*handle), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) handle->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) handle->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) handle->name = "dmcfreq";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) handle->private = dmcfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) error = input_register_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) error = input_open_device(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) input_unregister_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) kfree(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) static void rockchip_dmcfreq_input_disconnect(struct input_handle *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) input_close_device(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) input_unregister_handle(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) kfree(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) static const struct input_device_id rockchip_dmcfreq_input_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) INPUT_DEVICE_ID_MATCH_ABSBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) .evbit = { BIT_MASK(EV_ABS) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) BIT_MASK(ABS_MT_POSITION_X) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) BIT_MASK(ABS_MT_POSITION_Y) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) INPUT_DEVICE_ID_MATCH_ABSBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) .absbit = { [BIT_WORD(ABS_X)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) .evbit = { BIT_MASK(EV_KEY) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) static void rockchip_dmcfreq_boost_init(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (!dmcfreq->boost_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) INIT_WORK(&dmcfreq->boost_work, rockchip_dmcfreq_boost_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) dmcfreq->input_handler.event = rockchip_dmcfreq_input_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) dmcfreq->input_handler.connect = rockchip_dmcfreq_input_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) dmcfreq->input_handler.disconnect = rockchip_dmcfreq_input_disconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) dmcfreq->input_handler.name = "dmcfreq";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) dmcfreq->input_handler.id_table = rockchip_dmcfreq_input_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) if (input_register_handler(&dmcfreq->input_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) dev_err(dmcfreq->dev, "failed to register input handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) static unsigned long model_static_power(struct devfreq *devfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) unsigned long voltage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct device *dev = devfreq->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) int temperature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) unsigned long temp_squared, temp_cubed, temp_scaling_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (!IS_ERR_OR_NULL(dmcfreq->ddr_tz) && dmcfreq->ddr_tz->ops->get_temp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) dmcfreq->ddr_tz->ops->get_temp(dmcfreq->ddr_tz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) &temperature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) dev_warn_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) "failed to read temp for ddr thermal zone: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) temperature = FALLBACK_STATIC_TEMPERATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) temperature = FALLBACK_STATIC_TEMPERATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * Calculate the temperature scaling factor. To be applied to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) * voltage scaled power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) temp = temperature / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) temp_squared = temp * temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) temp_cubed = temp_squared * temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) temp_scaling_factor = (dmcfreq->ts[3] * temp_cubed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) + (dmcfreq->ts[2] * temp_squared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) + (dmcfreq->ts[1] * temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) + dmcfreq->ts[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) return (((dmcfreq->static_coefficient * voltage_cubed) >> 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) * temp_scaling_factor) / 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) static struct devfreq_cooling_power ddr_cooling_power_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) .get_static_power = model_static_power,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) .dyn_power_coeff = 120,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) static int ddr_power_model_simple_init(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) struct device_node *power_model_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) const char *tz_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) power_model_node = of_get_child_by_name(dmcfreq->dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) "ddr_power_model");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (!power_model_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) dev_err(dmcfreq->dev, "could not find power_model node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) if (of_property_read_string(power_model_node, "thermal-zone", &tz_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) dev_err(dmcfreq->dev, "ts in power_model not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) dmcfreq->ddr_tz = thermal_zone_get_zone_by_name(tz_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) if (IS_ERR(dmcfreq->ddr_tz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) pr_warn_ratelimited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) ("Error getting ddr thermal zone (%ld), not yet ready?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) PTR_ERR(dmcfreq->ddr_tz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) dmcfreq->ddr_tz = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (of_property_read_u32(power_model_node, "static-power-coefficient",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) &dmcfreq->static_coefficient)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) "static-power-coefficient not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) if (of_property_read_u32(power_model_node, "dynamic-power-coefficient",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) &temp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) "dynamic-power-coefficient not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) ddr_cooling_power_data.dyn_power_coeff = (unsigned long)temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) if (of_property_read_u32_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) (power_model_node, "ts", (u32 *)dmcfreq->ts, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) dev_err(dmcfreq->dev, "ts in power_model not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) rockchip_dmcfreq_register_cooling_device(struct rockchip_dmcfreq *dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) ret = ddr_power_model_simple_init(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) dmcfreq->devfreq_cooling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) of_devfreq_cooling_register_power(dmcfreq->dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) dmcfreq->info.devfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) &ddr_cooling_power_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (IS_ERR(dmcfreq->devfreq_cooling)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) ret = PTR_ERR(dmcfreq->devfreq_cooling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) dev_err(dmcfreq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) "Failed to register cooling device (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) static int rockchip_dmcfreq_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) struct rockchip_dmcfreq *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) data = devm_kzalloc(dev, sizeof(struct rockchip_dmcfreq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) data->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) data->info.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) mutex_init(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) INIT_LIST_HEAD(&data->video_info_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) ret = rockchip_dmcfreq_get_event(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) ret = rockchip_dmcfreq_power_control(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) ret = rockchip_init_opp_table(dev, NULL, "ddr_leakage", "center");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) ret = rockchip_dmcfreq_dmc_init(pdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) rockchip_dmcfreq_parse_dt(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (!data->system_status_en && !data->info.auto_freq_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) dev_info(dev, "don't add devfreq feature\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) return rockchip_dmcfreq_set_volt_only(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) cpu_latency_qos_add_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) platform_set_drvdata(pdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) ret = devfreq_add_governor(&devfreq_dmc_ondemand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) ret = rockchip_dmcfreq_enable_event(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) ret = rockchip_dmcfreq_add_devfreq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) rockchip_dmcfreq_disable_event(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) rockchip_dmcfreq_register_notifier(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) rockchip_dmcfreq_add_interface(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) rockchip_dmcfreq_boost_init(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) rockchip_dmcfreq_vop_bandwidth_init(&data->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) rockchip_dmcfreq_register_cooling_device(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) rockchip_set_system_status(SYS_STATUS_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) static __maybe_unused int rockchip_dmcfreq_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (!dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) ret = rockchip_dmcfreq_disable_event(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) ret = devfreq_suspend_device(dmcfreq->info.devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) dev_err(dev, "failed to suspend the devfreq devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) /* set voltage to sleep_volt if need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) ret = regulator_set_voltage(dmcfreq->vdd_center,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) dmcfreq->sleep_volt, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) dev_err(dev, "Cannot set vdd voltage %lu uV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) dmcfreq->sleep_volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) if (dmcfreq->sleep_mem_volt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) ret = regulator_set_voltage(dmcfreq->mem_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) dmcfreq->sleep_mem_volt, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) dev_err(dev, "Cannot set mem voltage %lu uV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) dmcfreq->sleep_mem_volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) static __maybe_unused int rockchip_dmcfreq_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (!dmcfreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) /* restore voltage if it is sleep_volt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) ret = regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) dev_err(dev, "Cannot set vdd voltage %lu uV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) dmcfreq->volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (dmcfreq->sleep_mem_volt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) ret = regulator_set_voltage(dmcfreq->mem_reg, dmcfreq->mem_volt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) dev_err(dev, "Cannot set mem voltage %lu uV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) dmcfreq->mem_volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) ret = rockchip_dmcfreq_enable_event(dmcfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) ret = devfreq_resume_device(dmcfreq->info.devfreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) dev_err(dev, "failed to resume the devfreq devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) static SIMPLE_DEV_PM_OPS(rockchip_dmcfreq_pm, rockchip_dmcfreq_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) rockchip_dmcfreq_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) static struct platform_driver rockchip_dmcfreq_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) .probe = rockchip_dmcfreq_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) .name = "rockchip-dmc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) .pm = &rockchip_dmcfreq_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) .of_match_table = rockchip_dmcfreq_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) module_platform_driver(rockchip_dmcfreq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) MODULE_DESCRIPTION("rockchip dmcfreq driver with devfreq framework");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) MODULE_LICENSE("GPL v2");