^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2011 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2011 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/reset-controller.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define SRC_SCR 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define SRC_GPR1 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define BP_SRC_SCR_WARM_RESET_ENABLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define BP_SRC_SCR_SW_GPU_RST 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define BP_SRC_SCR_SW_VPU_RST 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define BP_SRC_SCR_SW_IPU1_RST 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define BP_SRC_SCR_SW_OPEN_VG_RST 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define BP_SRC_SCR_SW_IPU2_RST 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define BP_SRC_SCR_CORE1_RST 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define BP_SRC_SCR_CORE1_ENABLE 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void __iomem *src_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static DEFINE_SPINLOCK(scr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const int sw_reset_bits[5] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) BP_SRC_SCR_SW_GPU_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) BP_SRC_SCR_SW_VPU_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) BP_SRC_SCR_SW_IPU1_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) BP_SRC_SCR_SW_OPEN_VG_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BP_SRC_SCR_SW_IPU2_RST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int imx_src_reset_module(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long sw_reset_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bit = 1 << sw_reset_bits[sw_reset_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) spin_lock_irqsave(&scr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) val = readl_relaxed(src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) val |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) writel_relaxed(val, src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) spin_unlock_irqrestore(&scr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) timeout = jiffies + msecs_to_jiffies(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) while (readl(src_base + SRC_SCR) & bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static const struct reset_control_ops imx_src_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .reset = imx_src_reset_module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static struct reset_controller_dev imx_reset_controller = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .ops = &imx_src_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .nr_resets = ARRAY_SIZE(sw_reset_bits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void imx_enable_cpu(int cpu, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) cpu = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) spin_lock(&scr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) val = readl_relaxed(src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) val = enable ? val | mask : val & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) writel_relaxed(val, src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) spin_unlock(&scr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void imx_set_cpu_jump(int cpu, void *jump_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) cpu = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) writel_relaxed(__pa_symbol(jump_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) src_base + SRC_GPR1 + cpu * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 imx_get_cpu_arg(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cpu = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void imx_set_cpu_arg(int cpu, u32 arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cpu = cpu_logical_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void __init imx_src_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) src_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) WARN_ON(!src_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) imx_reset_controller.of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) reset_controller_register(&imx_reset_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * force warm reset sources to generate cold reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * for a more reliable restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_lock(&scr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) val = readl_relaxed(src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) writel_relaxed(val, src_base + SRC_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_unlock(&scr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }