Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/qcom_scm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/reset-controller.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "qcom_scm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) module_param(download_mode, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define SCM_HAS_CORE_CLK	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define SCM_HAS_IFACE_CLK	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define SCM_HAS_BUS_CLK		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) struct qcom_scm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	struct clk *core_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	struct clk *iface_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct clk *bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	struct reset_controller_dev reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	u64 dload_mode_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) struct qcom_scm_current_perm_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	__le32 vmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	__le32 perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	__le64 ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	__le32 ctx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	__le32 unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) struct qcom_scm_mem_map_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	__le64 mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	__le64 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) struct qcom_scm_wb_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static struct qcom_scm_wb_entry qcom_scm_wb[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static const char *qcom_scm_convention_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	[SMC_CONVENTION_UNKNOWN] = "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	[SMC_CONVENTION_LEGACY] = "smc legacy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static struct qcom_scm *__scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static int qcom_scm_clk_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	ret = clk_prepare_enable(__scm->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	ret = clk_prepare_enable(__scm->iface_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		goto disable_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	ret = clk_prepare_enable(__scm->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		goto disable_iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) disable_iface:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	clk_disable_unprepare(__scm->iface_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) disable_core:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	clk_disable_unprepare(__scm->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void qcom_scm_clk_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	clk_disable_unprepare(__scm->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	clk_disable_unprepare(__scm->iface_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	clk_disable_unprepare(__scm->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static DEFINE_SPINLOCK(scm_query_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static enum qcom_scm_convention __get_convention(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		.svc = QCOM_SCM_SVC_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	enum qcom_scm_convention probed_convention;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	bool forced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		return qcom_scm_convention;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * Device isn't required as there is only one argument - no device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 * needed to dma_map_single to secure world
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	probed_convention = SMC_CONVENTION_ARM_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (!ret && res.result[0] == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 * Some SC7180 firmwares didn't implement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * calling conventions on these firmwares. Luckily we don't make any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * early calls into the firmware on these SoCs so the device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * will be valid here to check if the compatible matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		forced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	probed_convention = SMC_CONVENTION_ARM_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (!ret && res.result[0] == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	probed_convention = SMC_CONVENTION_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	spin_lock_irqsave(&scm_query_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (probed_convention != qcom_scm_convention) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		qcom_scm_convention = probed_convention;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		pr_info("qcom_scm: convention: %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			qcom_scm_convention_names[qcom_scm_convention],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			forced ? " (forced)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	spin_unlock_irqrestore(&scm_query_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	return qcom_scm_convention;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * qcom_scm_call() - Invoke a syscall in the secure world
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * @dev:	device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * @svc_id:	service identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @cmd_id:	command identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @desc:	Descriptor structure containing arguments and return values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * Sends a command to the SCM and waits for the command to finish processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * This should *only* be called in pre-emptible context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			 struct qcom_scm_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	switch (__get_convention()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	case SMC_CONVENTION_ARM_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	case SMC_CONVENTION_ARM_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		return scm_smc_call(dev, desc, res, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	case SMC_CONVENTION_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		return scm_legacy_call(dev, desc, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		pr_err("Unknown current SCM calling convention.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * @dev:	device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * @svc_id:	service identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * @cmd_id:	command identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * @desc:	Descriptor structure containing arguments and return values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * @res:	Structure containing results from SMC/HVC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * Sends a command to the SCM and waits for the command to finish processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * This can be called in atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static int qcom_scm_call_atomic(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 				const struct qcom_scm_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 				struct qcom_scm_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	switch (__get_convention()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	case SMC_CONVENTION_ARM_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	case SMC_CONVENTION_ARM_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return scm_smc_call(dev, desc, res, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	case SMC_CONVENTION_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		return scm_legacy_call_atomic(dev, desc, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		pr_err("Unknown current SCM calling convention.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 					 u32 cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		.svc = QCOM_SCM_SVC_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	desc.arginfo = QCOM_SCM_ARGS(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	switch (__get_convention()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	case SMC_CONVENTION_ARM_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	case SMC_CONVENTION_ARM_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	case SMC_CONVENTION_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		pr_err("Unknown SMC convention being used\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	ret = qcom_scm_call(dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	return ret ? false : !!res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * @entry: Entry point function for the cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * @cpus: The cpumask of cpus that will use the entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * Set the Linux entry point for the SCM to transfer control to when coming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		.svc = QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	 * Reassign only if we are switching from hotplug entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	 * to cpuidle entry point or vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	for_each_cpu(cpu, cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (entry == qcom_scm_wb[cpu].entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		flags |= qcom_scm_wb[cpu].flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	/* No change in entry function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	desc.args[0] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	desc.args[1] = virt_to_phys(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		for_each_cpu(cpu, cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			qcom_scm_wb[cpu].entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * @entry: Entry point function for the cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * @cpus: The cpumask of cpus that will use the entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * Set the cold boot address of the cpus. Any cpu outside the supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * range would be removed from the cpu present mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	int scm_cb_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		QCOM_SCM_FLAG_COLDBOOT_CPU0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		QCOM_SCM_FLAG_COLDBOOT_CPU1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		QCOM_SCM_FLAG_COLDBOOT_CPU2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		QCOM_SCM_FLAG_COLDBOOT_CPU3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		.svc = QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (!cpus || (cpus && cpumask_empty(cpus)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	for_each_cpu(cpu, cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		if (cpu < ARRAY_SIZE(scm_cb_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			flags |= scm_cb_flags[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			set_cpu_present(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	desc.args[0] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	desc.args[1] = virt_to_phys(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * qcom_scm_cpu_power_down() - Power down the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * @flags - Flags to flush cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * This is an end point to power down cpu. If there was a pending interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * the control would return from this function, otherwise, the cpu jumps to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * warm boot entry point set for this cpu upon reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) void qcom_scm_cpu_power_down(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		.svc = QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) EXPORT_SYMBOL(qcom_scm_cpu_power_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) int qcom_scm_set_remote_state(u32 state, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		.svc = QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		.args[0] = state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		.args[1] = id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) EXPORT_SYMBOL(qcom_scm_set_remote_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		.svc = QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static void qcom_scm_set_download_mode(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	bool avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	avail = __qcom_scm_is_call_available(__scm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 					     QCOM_SCM_SVC_BOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	} else if (__scm->dload_mode_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		dev_err(__scm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			"No available mechanism for setting download mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *			       state machine for a given peripheral, using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  *			       metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * @peripheral: peripheral id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * @metadata:	pointer to memory containing ELF header, program header table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  *		and optional blob of data used for authenticating the metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  *		and the rest of the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * @size:	size of the metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	dma_addr_t mdata_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	void *mdata_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		.args[0] = peripheral,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 * During the scm call memory protection will be enabled for the meta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 * data blob, so make sure it's physically contiguous, 4K aligned and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * non-cachable to avoid XPU violations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (!mdata_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	memcpy(mdata_buf, metadata, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		goto free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	desc.args[1] = mdata_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) free_metadata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) EXPORT_SYMBOL(qcom_scm_pas_init_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  *			      for firmware loading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * @peripheral:	peripheral id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * @addr:	start address of memory area to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * @size:	size of the memory area to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		.arginfo = QCOM_SCM_ARGS(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		.args[0] = peripheral,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		.args[1] = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		.args[2] = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  *				   and reset the remote processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * @peripheral:	peripheral id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  * Return 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) int qcom_scm_pas_auth_and_reset(u32 peripheral)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		.args[0] = peripheral,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * qcom_scm_pas_shutdown() - Shut down the remote processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * @peripheral: peripheral id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) int qcom_scm_pas_shutdown(u32 peripheral)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		.args[0] = peripheral,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) EXPORT_SYMBOL(qcom_scm_pas_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *			      available for the given peripherial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * @peripheral:	peripheral id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * Returns true if PAS is supported for this peripheral, otherwise false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) bool qcom_scm_pas_supported(u32 peripheral)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		.args[0] = peripheral,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return ret ? false : !!res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) EXPORT_SYMBOL(qcom_scm_pas_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		.svc = QCOM_SCM_SVC_PIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		.args[0] = reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		.args[1] = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 				     unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (idx != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 				       unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (idx != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static const struct reset_control_ops qcom_scm_pas_reset_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	.assert = qcom_scm_pas_reset_assert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	.deassert = qcom_scm_pas_reset_deassert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		.svc = QCOM_SCM_SVC_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		.cmd = QCOM_SCM_IO_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		.args[0] = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		*val = res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) EXPORT_SYMBOL(qcom_scm_io_readl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		.svc = QCOM_SCM_SVC_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		.cmd = QCOM_SCM_IO_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		.args[0] = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		.args[1] = val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) EXPORT_SYMBOL(qcom_scm_io_writel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * supports restore security config interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * Return true if restore-cfg interface is supported, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) bool qcom_scm_restore_sec_cfg_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		.svc = QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		.args[0] = device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		.args[1] = spare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		.svc = QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		.args[0] = spare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		*size = res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	return ret ? : res.result[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		.svc = QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 					 QCOM_SCM_VAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		.args[0] = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		.args[1] = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		.args[2] = spare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* the pg table has been initialized already, ignore the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (ret == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				   u32 cp_nonpixel_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				   u32 cp_nonpixel_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		.svc = QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		.args[0] = cp_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		.args[1] = cp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		.args[2] = cp_nonpixel_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		.args[3] = cp_nonpixel_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				 phys_addr_t dest, size_t dest_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		.svc = QCOM_SCM_SVC_MP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		.cmd = QCOM_SCM_MP_ASSIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		.args[0] = mem_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		.args[1] = mem_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		.args[2] = src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		.args[3] = src_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		.args[4] = dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		.args[5] = dest_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		.args[6] = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	ret = qcom_scm_call(dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return ret ? : res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  * @mem_addr: mem region whose ownership need to be reassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * @mem_sz:   size of the region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * @srcvm:    vmid for current set of owners, each set bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  *            flag indicate a unique owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  * @newvm:    array having new owners and corresponding permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  *            flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * @dest_cnt: number of owners in next set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * Return negative errno on failure or 0 on success with @srcvm updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			unsigned int *srcvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			const struct qcom_scm_vmperm *newvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			unsigned int dest_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct qcom_scm_current_perm_info *destvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	struct qcom_scm_mem_map_info *mem_to_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	phys_addr_t mem_to_map_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	phys_addr_t dest_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	dma_addr_t ptr_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	size_t mem_to_map_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	size_t dest_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	size_t src_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	size_t ptr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	int next_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	__le32 *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	int ret, i, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	unsigned long srcvm_bits = *srcvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	mem_to_map_sz = sizeof(*mem_to_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	dest_sz = dest_cnt * sizeof(*destvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			ALIGN(dest_sz, SZ_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	/* Fill source vmid detail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	src = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		src[i++] = cpu_to_le32(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/* Fill details of mem buff to map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	next_vm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* Fill details of next vmid detail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		destvm->vmid = cpu_to_le32(newvm->vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		destvm->perm = cpu_to_le32(newvm->perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		destvm->ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		destvm->ctx_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		next_vm |= BIT(newvm->vmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				    ptr_phys, src_sz, dest_phys, dest_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		dev_err(__scm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			"Assign memory protection call failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	*srcvm = next_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) EXPORT_SYMBOL(qcom_scm_assign_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) bool qcom_scm_ocmem_lock_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 					    QCOM_SCM_OCMEM_LOCK_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * region to the specified initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * @id:     tz initiator id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * @offset: OCMEM offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * @size:   OCMEM size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * @mode:   access mode (WIDE/NARROW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			u32 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		.svc = QCOM_SCM_SVC_OCMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		.args[0] = id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		.args[1] = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		.args[2] = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		.args[3] = mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		.arginfo = QCOM_SCM_ARGS(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	return qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) EXPORT_SYMBOL(qcom_scm_ocmem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * region from the specified initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * @id:     tz initiator id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * @offset: OCMEM offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * @size:   OCMEM size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		.svc = QCOM_SCM_SVC_OCMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		.args[0] = id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		.args[1] = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		.args[2] = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		.arginfo = QCOM_SCM_ARGS(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  * qcom_scm_ice_available() - Is the ICE key programming interface available?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  *	   qcom_scm_ice_set_key() are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) bool qcom_scm_ice_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) EXPORT_SYMBOL(qcom_scm_ice_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * @index: the keyslot to invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  * The UFSHCI and eMMC standards define a standard way to do this, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * doesn't work on these SoCs; only this SCM call does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * It is assumed that the SoC has only one ICE instance being used, as this SCM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * call doesn't specify which ICE instance the keyslot belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * Return: 0 on success; -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) int qcom_scm_ice_invalidate_key(u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		.svc = QCOM_SCM_SVC_ES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		.arginfo = QCOM_SCM_ARGS(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		.args[0] = index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * qcom_scm_ice_set_key() - Set an inline encryption key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * @index: the keyslot into which to set the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * @key: the key to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * @key_size: the size of the key in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * @cipher: the encryption algorithm the key is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * @data_unit_size: the encryption data unit size, i.e. the size of each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  *		    individual plaintext and ciphertext.  Given in 512-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * The UFSHCI and eMMC standards define a standard way to do this, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * doesn't work on these SoCs; only this SCM call does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * It is assumed that the SoC has only one ICE instance being used, as this SCM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * call doesn't specify which ICE instance the keyslot belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * Return: 0 on success; -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		.svc = QCOM_SCM_SVC_ES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 					 QCOM_SCM_VAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		.args[0] = index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		.args[2] = key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		.args[3] = cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		.args[4] = data_unit_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	void *keybuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	dma_addr_t key_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * physical address that's been properly flushed.  The sanctioned way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * do this is by using the DMA API.  But as is best practice for crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * dma_map_single() not clearly correct, since the DMA API can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * keys is normally rare and thus not performance-critical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (!keybuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	memcpy(keybuf, key, key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	desc.args[1] = key_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	memzero_explicit(keybuf, key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) EXPORT_SYMBOL(qcom_scm_ice_set_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  * Return true if HDCP is supported, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) bool qcom_scm_hdcp_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	bool avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	int ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 						QCOM_SCM_HDCP_INVOKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	return avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) EXPORT_SYMBOL(qcom_scm_hdcp_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * qcom_scm_hdcp_req() - Send HDCP request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * @req: HDCP request array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  * @req_cnt: HDCP request array count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * @resp: response buffer passed to SCM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  * Write HDCP register(s) through SCM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		.svc = QCOM_SCM_SVC_HDCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		.cmd = QCOM_SCM_HDCP_INVOKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		.arginfo = QCOM_SCM_ARGS(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		.args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			req[0].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			req[0].val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			req[1].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			req[1].val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			req[2].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			req[2].val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			req[3].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			req[3].val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			req[4].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			req[4].val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct qcom_scm_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	ret = qcom_scm_clk_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	ret = qcom_scm_call(__scm->dev, &desc, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	*resp = res.result[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	qcom_scm_clk_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) EXPORT_SYMBOL(qcom_scm_hdcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	struct qcom_scm_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		.arginfo = QCOM_SCM_ARGS(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		.args[1] = en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		.owner = ARM_SMCCC_OWNER_SIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	struct device_node *tcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (!tcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	ret = of_address_to_resource(tcsr, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	of_node_put(tcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	*addr = res.start + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * qcom_scm_is_available() - Checks if SCM is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) bool qcom_scm_is_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	return !!__scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) EXPORT_SYMBOL(qcom_scm_is_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static int qcom_scm_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	struct qcom_scm *scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	unsigned long clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (!scm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	scm->core_clk = devm_clk_get(&pdev->dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (IS_ERR(scm->core_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			return PTR_ERR(scm->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (clks & SCM_HAS_CORE_CLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			dev_err(&pdev->dev, "failed to acquire core clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			return PTR_ERR(scm->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		scm->core_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	if (IS_ERR(scm->iface_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			return PTR_ERR(scm->iface_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		if (clks & SCM_HAS_IFACE_CLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			dev_err(&pdev->dev, "failed to acquire iface clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			return PTR_ERR(scm->iface_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		scm->iface_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (IS_ERR(scm->bus_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			return PTR_ERR(scm->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (clks & SCM_HAS_BUS_CLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			dev_err(&pdev->dev, "failed to acquire bus clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			return PTR_ERR(scm->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		scm->bus_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	scm->reset.ops = &qcom_scm_pas_reset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	scm->reset.nr_resets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	scm->reset.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	/* vote for max clk rate for highest performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	ret = clk_set_rate(scm->core_clk, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	__scm = scm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	__scm->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	__get_convention();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	 * If requested enable "download mode", from this point on warmboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	 * will cause the the boot stages to enter download mode, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	 * disabled below by a clean shutdown/reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (download_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		qcom_scm_set_download_mode(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void qcom_scm_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	/* Clean shutdown, disable download mode to allow normal restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (download_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		qcom_scm_set_download_mode(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static const struct of_device_id qcom_scm_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	{ .compatible = "qcom,scm-apq8064",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 							     SCM_HAS_IFACE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 							     SCM_HAS_BUS_CLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	{ .compatible = "qcom,scm-ipq4019" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 							     SCM_HAS_IFACE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 							     SCM_HAS_BUS_CLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 							     SCM_HAS_IFACE_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 							     SCM_HAS_BUS_CLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	{ .compatible = "qcom,scm-msm8994" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	{ .compatible = "qcom,scm-msm8996" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	{ .compatible = "qcom,scm" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static struct platform_driver qcom_scm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		.name	= "qcom_scm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		.of_match_table = qcom_scm_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	.probe = qcom_scm_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	.shutdown = qcom_scm_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static int __init qcom_scm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	return platform_driver_register(&qcom_scm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) subsys_initcall(qcom_scm_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) MODULE_LICENSE("GPL v2");