^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (c) 2019 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/remoteproc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/remoteproc/mtk_scp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/rpmsg/mtk_rpmsg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "mtk_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "remoteproc_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define MAX_CODE_SIZE 0x500000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SCP_FW_END 0x7C000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * scp_get() - get a reference to SCP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @pdev: the platform device of the module requesting SCP platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * device for using SCP API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Return: Return NULL if failed. otherwise reference to SCP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct mtk_scp *scp_get(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct device_node *scp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct platform_device *scp_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!scp_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dev_err(dev, "can't get SCP node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) scp_pdev = of_find_device_by_node(scp_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) of_node_put(scp_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (WARN_ON(!scp_pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev_err(dev, "SCP pdev failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return platform_get_drvdata(scp_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) EXPORT_SYMBOL_GPL(scp_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * scp_put() - "free" the SCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @scp: mtk_scp structure from scp_get().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void scp_put(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) put_device(scp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) EXPORT_SYMBOL_GPL(scp_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct mtk_scp *scp = (struct mtk_scp *)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct scp_run *run = (struct scp_run *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) scp->run.signaled = run->signaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) scp->run.dec_capability = run->dec_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) scp->run.enc_capability = run->enc_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) wake_up_interruptible(&scp->run.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void scp_ipi_handler(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) scp_ipi_handler_t handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 id = readl(&rcv_obj->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 len = readl(&rcv_obj->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (len > SCP_SHARE_BUFFER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) SCP_SHARE_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (id >= SCP_IPI_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dev_err(scp->dev, "No such ipi id = %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) scp_ipi_lock(scp, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) handler = ipi_desc[id].handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dev_err(scp->dev, "No such ipi id = %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) scp_ipi_unlock(scp, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) handler(tmp_data, len, ipi_desc[id].priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) scp_ipi_unlock(scp, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) scp->ipi_id_ack[id] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) wake_up(&scp->ack_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int scp_ipi_init(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* shared buffer initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) scp->recv_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) scp->send_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void mt8183_scp_reset_assert(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) val = readl(scp->reg_base + MT8183_SW_RSTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) val &= ~MT8183_SW_RSTN_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) writel(val, scp->reg_base + MT8183_SW_RSTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) val = readl(scp->reg_base + MT8183_SW_RSTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) val |= MT8183_SW_RSTN_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) writel(val, scp->reg_base + MT8183_SW_RSTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void mt8192_scp_reset_assert(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void mt8183_scp_irq_handler(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 scp_to_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) scp_ipi_handler(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) scp_wdt_handler(scp, scp_to_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) scp->reg_base + MT8183_SCP_TO_HOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void mt8192_scp_irq_handler(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 scp_to_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) scp_ipi_handler(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * SCP won't send another interrupt until we clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * MT8192_SCP2APMCU_IPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel(MT8192_SCP_IPC_INT_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) scp_wdt_handler(scp, scp_to_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static irqreturn_t scp_irq_handler(int irq, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct mtk_scp *scp = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = clk_prepare_enable(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev_err(scp->dev, "failed to enable clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) scp->data->scp_irq_handler(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct device *dev = &rproc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct elf32_hdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct elf32_phdr *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) const u8 *elf_data = fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ehdr = (struct elf32_hdr *)elf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* go through the available ELF segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 da = phdr->p_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 memsz = phdr->p_memsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 filesz = phdr->p_filesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 offset = phdr->p_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void __iomem *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (phdr->p_type != PT_LOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) phdr->p_type, da, memsz, filesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (filesz > memsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) filesz, memsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (offset + filesz > fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) offset + filesz, fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* grab the kernel address for this device address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* put the segment where the remote processor expects it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (phdr->p_filesz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) filesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int mt8183_scp_before_load(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Clear SCP to host interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Reset clocks before loading FW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Initialize TCM before loading FW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Turn on the power of SCP's SRAM before using it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Set I-cache and D-cache size before loading SCP FW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * SCP SRAM logical address may change when cache size setting differs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) scp->reg_base + MT8183_SCP_CACHE_CON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void mt8192_power_on_sram(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = 31; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) writel(GENMASK(i, 0), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) writel(0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void mt8192_power_off_sram(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) writel(0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) writel(GENMASK(i, 0), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int mt8192_scp_before_load(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* clear SPM interrupt, SCP2SPM_IPC_CLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* enable SRAM clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int scp_load(struct rproc *rproc, const struct firmware *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct mtk_scp *scp = rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct device *dev = scp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = clk_prepare_enable(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev_err(dev, "failed to enable clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Hold SCP in reset while loading FW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) scp->data->scp_reset_assert(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ret = scp->data->scp_before_load(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ret = scp_elf_load_segments(rproc, fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static int scp_start(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct device *dev = scp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct scp_run *run = &scp->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = clk_prepare_enable(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_err(dev, "failed to enable clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) run->signaled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) scp->data->scp_reset_deassert(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = wait_event_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) run->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) run->signaled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) msecs_to_jiffies(2000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_err(dev, "wait SCP initialization timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (ret == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dev_err(dev, "wait SCP interrupted by a signal!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) scp->data->scp_reset_assert(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (da < scp->sram_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) offset = da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (offset >= 0 && (offset + len) < scp->sram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return (void __force *)scp->sram_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else if (scp->dram_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) offset = da - scp->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (offset >= 0 && (offset + len) < scp->dram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return (void __force *)scp->cpu_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void mt8183_scp_stop(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Disable SCP watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) writel(0, scp->reg_base + MT8183_WDT_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void mt8192_scp_stop(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Disable SRAM clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Disable SCP watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int scp_stop(struct rproc *rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = clk_prepare_enable(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_err(scp->dev, "failed to enable clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) scp->data->scp_reset_assert(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) scp->data->scp_stop(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static const struct rproc_ops scp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .start = scp_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .stop = scp_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .load = scp_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .da_to_va = scp_da_to_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * scp_get_device() - get device struct of SCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @scp: mtk_scp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct device *scp_get_device(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return scp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) EXPORT_SYMBOL_GPL(scp_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * scp_get_rproc() - get rproc struct of SCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @scp: mtk_scp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct rproc *scp_get_rproc(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return scp->rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL_GPL(scp_get_rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * scp_get_vdec_hw_capa() - get video decoder hardware capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @scp: mtk_scp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Return: video decoder hardware capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return scp->run.dec_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * scp_get_venc_hw_capa() - get video encoder hardware capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @scp: mtk_scp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Return: video encoder hardware capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return scp->run.enc_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * @scp: mtk_scp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * @mem_addr: SCP views memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Mapping the SCP's SRAM address /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * DMEM (Data Extended Memory) memory address /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Working buffer memory address to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * kernel virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Return: Return ERR_PTR(-EINVAL) if mapping failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * otherwise the mapped kernel virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int scp_map_memory_region(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ret = of_reserved_mem_device_init(scp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* reserved memory is optional. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (ret == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_info(scp->dev, "skipping reserved memory initialization.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Reserved SCP code size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) scp->dram_size = MAX_CODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) &scp->dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!scp->cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void scp_unmap_memory_region(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (scp->dram_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) scp->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) of_reserved_mem_device_release(scp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int scp_register_ipi(struct platform_device *pdev, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ipi_handler_t handler, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct mtk_scp *scp = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return scp_ipi_register(scp, id, handler, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct mtk_scp *scp = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) scp_ipi_unregister(scp, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned int len, unsigned int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct mtk_scp *scp = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return scp_ipi_send(scp, id, buf, len, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .send_ipi = scp_send_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .register_ipi = scp_register_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .unregister_ipi = scp_unregister_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .ns_ipi_id = SCP_IPI_NS_SERVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) scp->rpmsg_subdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) &mtk_scp_rpmsg_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (scp->rpmsg_subdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (scp->rpmsg_subdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) scp->rpmsg_subdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static int scp_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct mtk_scp *scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct rproc *rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) char *fw_name = "scp.img";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) rproc = rproc_alloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) np->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &scp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) fw_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sizeof(*scp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!rproc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dev_err(dev, "unable to allocate remoteproc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) scp = (struct mtk_scp *)rproc->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) scp->rproc = rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) scp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) scp->data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) platform_set_drvdata(pdev, scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) scp->sram_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (IS_ERR((__force void *)scp->sram_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dev_err(dev, "Failed to parse and map sram memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ret = PTR_ERR((__force void *)scp->sram_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) goto free_rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) scp->sram_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mutex_init(&scp->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) for (i = 0; i < SCP_IPI_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) mutex_init(&scp->ipi_desc[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) scp->reg_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (IS_ERR((__force void *)scp->reg_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dev_err(dev, "Failed to parse and map cfg memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = PTR_ERR((__force void *)scp->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto destroy_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ret = scp_map_memory_region(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto destroy_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) scp->clk = devm_clk_get(dev, "main");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (IS_ERR(scp->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dev_err(dev, "Failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = PTR_ERR(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto release_dev_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = clk_prepare_enable(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dev_err(dev, "failed to enable clocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) goto release_dev_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ret = scp_ipi_init(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) clk_disable_unprepare(scp->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dev_err(dev, "Failed to init ipi\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto release_dev_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* register SCP initialization IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dev_err(dev, "Failed to register IPI_SCP_INIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) goto release_dev_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) init_waitqueue_head(&scp->run.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) init_waitqueue_head(&scp->ack_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) scp_add_rpmsg_subdev(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) scp_irq_handler, IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pdev->name, scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_err(dev, "failed to request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto remove_subdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = rproc_add(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto remove_subdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) remove_subdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) scp_remove_rpmsg_subdev(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) scp_ipi_unregister(scp, SCP_IPI_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) release_dev_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) scp_unmap_memory_region(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) destroy_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (i = 0; i < SCP_IPI_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mutex_destroy(&scp->ipi_desc[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mutex_destroy(&scp->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) free_rproc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) rproc_free(rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int scp_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct mtk_scp *scp = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rproc_del(scp->rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) scp_remove_rpmsg_subdev(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) scp_ipi_unregister(scp, SCP_IPI_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) scp_unmap_memory_region(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) for (i = 0; i < SCP_IPI_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) mutex_destroy(&scp->ipi_desc[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mutex_destroy(&scp->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) rproc_free(scp->rproc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static const struct mtk_scp_of_data mt8183_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .scp_before_load = mt8183_scp_before_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .scp_irq_handler = mt8183_scp_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .scp_reset_assert = mt8183_scp_reset_assert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .scp_reset_deassert = mt8183_scp_reset_deassert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .scp_stop = mt8183_scp_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .host_to_scp_reg = MT8183_HOST_TO_SCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static const struct mtk_scp_of_data mt8192_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .scp_before_load = mt8192_scp_before_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .scp_irq_handler = mt8192_scp_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .scp_reset_assert = mt8192_scp_reset_assert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .scp_reset_deassert = mt8192_scp_reset_deassert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .scp_stop = mt8192_scp_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .host_to_scp_reg = MT8192_GIPC_IN_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static const struct of_device_id mtk_scp_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static struct platform_driver mtk_scp_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .probe = scp_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .remove = scp_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) .name = "mtk-scp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .of_match_table = mtk_scp_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) module_platform_driver(mtk_scp_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) MODULE_DESCRIPTION("MediaTek SCP control driver");