^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2019 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Stanley Chu <stanley.chu@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Peter Wang <peter.wang@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/soc/mediatek/mtk_sip_svc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "ufshcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "ufshcd-crypto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ufshcd-pltfrm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "ufs_quirks.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "unipro.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "ufs-mediatek.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "ufs-mediatek-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define ufs_mtk_smc(cmd, val, res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) cmd, val, 0, 0, 0, 0, 0, &(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ufs_mtk_va09_pwr_ctrl(res, on) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ufs_mtk_crypto_ctrl(res, enable) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define ufs_mtk_ref_clk_notify(on, res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ufs_mtk_device_reset_ctrl(high, res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) END_FIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static const struct of_device_id ufs_mtk_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) { .compatible = "mediatek,mt8183-ufshci" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ufshcd_dme_get(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) tmp = tmp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) (1 << RX_SYMBOL_CLK_GATE_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (1 << SYS_CLK_GATE_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (1 << TX_CLK_GATE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ufshcd_dme_get(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ufshcd_dme_get(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (1 << SYS_CLK_GATE_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) (1 << TX_CLK_GATE_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ufshcd_dme_get(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ufs_mtk_crypto_ctrl(res, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (res.a0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __func__, res.a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) hba->caps &= ~UFSHCD_CAP_CRYPTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void ufs_mtk_host_reset(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) reset_control_assert(host->hci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) reset_control_assert(host->crypto_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) reset_control_assert(host->unipro_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) usleep_range(100, 110);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) reset_control_deassert(host->unipro_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) reset_control_deassert(host->crypto_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) reset_control_deassert(host->hci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct reset_control **rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *rc = devm_reset_control_get(hba->dev, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (IS_ERR(*rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) str, PTR_ERR(*rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *rc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void ufs_mtk_init_reset(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ufs_mtk_init_reset_control(hba, &host->hci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "hci_rst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ufs_mtk_init_reset_control(hba, &host->unipro_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) "unipro_rst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ufs_mtk_init_reset_control(hba, &host->crypto_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) "crypto_rst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) enum ufs_notify_change_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (status == PRE_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (host->unipro_lpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) hba->vps->hba_enable_delay_us = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) hba->vps->hba_enable_delay_us = 600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ufs_mtk_host_reset(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (hba->caps & UFSHCD_CAP_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ufs_mtk_crypto_enable(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_lock_irqsave(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ufshcd_writel(hba, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) REG_AUTO_HIBERNATE_IDLE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_unlock_irqrestore(hba->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) hba->ahit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct device *dev = hba->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) host->mphy = devm_of_phy_get_by_index(dev, np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * UFS driver might be probed before the phy driver does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * In that case we would like to return EPROBE_DEFER code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "%s: required phy hasn't probed yet. err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else if (IS_ERR(host->mphy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) err = PTR_ERR(host->mphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (err != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dev_info(dev, "%s: PHY get failed %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) host->mphy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Allow unbound mphy because not every platform needs specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * mphy control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (err == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ktime_t timeout, time_checked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (host->ref_clk_enabled == on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ufs_mtk_ref_clk_notify(on, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Wait for ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) time_checked = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Wait until ack bit equals to req bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) } while (ktime_before(time_checked, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) host->ref_clk_enabled = on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ufs_mtk_ref_clk_notify(on, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u16 gating_us, u16 ungating_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (hba->dev_info.clk_gating_wait_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) host->ref_clk_gating_wait_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) hba->dev_info.clk_gating_wait_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) host->ref_clk_gating_wait_us = gating_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) host->ref_clk_ungating_wait_us = ungating_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long max_wait_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ktime_t timeout, time_checked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) timeout = ktime_add_ms(ktime_get(), max_wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) time_checked = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) val = ufshcd_readl(hba, REG_UFS_PROBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) val = val >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (val == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Sleep for max. 200us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } while (ktime_before(time_checked, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (val == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct phy *mphy = host->mphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!mphy || !(on ^ host->mphy_powered_on))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ufs_mtk_is_va09_supported(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = regulator_enable(host->reg_va09);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* wait 200 us to stablize VA09 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) usleep_range(200, 210);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ufs_mtk_va09_pwr_ctrl(res, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) phy_power_on(mphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) phy_power_off(mphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (ufs_mtk_is_va09_supported(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ufs_mtk_va09_pwr_ctrl(res, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = regulator_disable(host->reg_va09);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev_info(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) "failed to %s va09: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) on ? "enable" : "disable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) host->mphy_powered_on = on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct clk **clk_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) clk = devm_clk_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) err = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *clk_out = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct ufs_mtk_crypt_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct regulator *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int volt, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!ufs_mtk_is_boost_crypt_enabled(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cfg = host->crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) volt = cfg->vcore_volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) reg = cfg->reg_vcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = clk_prepare_enable(cfg->clk_crypt_mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_info(hba->dev, "clk_prepare_enable(): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (boost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = regulator_set_voltage(reg, volt, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dev_info(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) "failed to set vcore to %d\n", volt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = clk_set_parent(cfg->clk_crypt_mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cfg->clk_crypt_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dev_info(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) "failed to set clk_crypt_perf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) regulator_set_voltage(reg, 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = clk_set_parent(cfg->clk_crypt_mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cfg->clk_crypt_lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev_info(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) "failed to set clk_crypt_lp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = regulator_set_voltage(reg, 0, INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_info(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "failed to set vcore to MIN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) clk_disable_unprepare(cfg->clk_crypt_mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct clk **clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct ufs_mtk_crypt_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct device *dev = hba->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct regulator *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!host->crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (IS_ERR(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dev_info(dev, "failed to get dvfsrc-vcore: %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) PTR_ERR(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) &volt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dev_info(dev, "failed to get boost-crypt-vcore-min");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cfg = host->crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ufs_mtk_init_host_clk(hba, "crypt_mux",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) &cfg->clk_crypt_mux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (ufs_mtk_init_host_clk(hba, "crypt_lp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) &cfg->clk_crypt_lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (ufs_mtk_init_host_clk(hba, "crypt_perf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) &cfg->clk_crypt_perf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) goto disable_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cfg->reg_vcore = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cfg->vcore_volt = volt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) disable_caps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) host->reg_va09 = regulator_get(hba->dev, "va09");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!host->reg_va09)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dev_info(hba->dev, "failed to get va09");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct device_node *np = hba->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ufs_mtk_init_boost_crypt(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ufs_mtk_init_va09_pwr_ctrl(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) host->caps |= UFS_MTK_CAP_DISABLE_AH8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) host->caps |= UFS_MTK_CAP_BROKEN_VCC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dev_info(hba->dev, "caps: 0x%x", host->caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * ufs_mtk_setup_clocks - enables/disable clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @hba: host controller instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @on: If true, enable clocks else disable them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @status: PRE_CHANGE or POST_CHANGE notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Returns 0 on success, non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) enum ufs_notify_change_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bool clk_pwr_off = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * In case ufs_mtk_init() is not yet done, simply ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * This ufs_mtk_setup_clocks() shall be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * ufs_mtk_init() after init is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!on && status == PRE_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (ufshcd_is_link_off(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) clk_pwr_off = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) } else if (ufshcd_is_link_hibern8(hba) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (!ufshcd_can_hibern8_during_gating(hba) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ufshcd_is_auto_hibern8_enabled(hba))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * Gate ref-clk and poweroff mphy if link state is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * OFF or Hibern8 by either Auto-Hibern8 or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * ufshcd_link_state_transition().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = ufs_mtk_wait_link_state(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) VS_LINK_HIBERN8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) clk_pwr_off = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (clk_pwr_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ufs_mtk_boost_crypt(hba, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ufs_mtk_setup_ref_clk(hba, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) phy_power_off(host->mphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) } else if (on && status == POST_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) phy_power_on(host->mphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ufs_mtk_setup_ref_clk(hba, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ufs_mtk_boost_crypt(hba, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int ret, ver = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (host->hw_ver.major)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Set default (minimum) version anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) host->hw_ver.major = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ver >= UFS_UNIPRO_VER_1_8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) host->hw_ver.major = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Fix HCI version for some platforms with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * incorrect version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (hba->ufs_version < ufshci_version(3, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) hba->ufs_version = ufshci_version(3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return hba->ufs_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * ufs_mtk_init - find other essential mmio bases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * @hba: host controller instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Binds PHY with controller and powers up PHY enabling clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * and regulators.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * Returns -EPROBE_DEFER if binding fails, returns negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * on phy power up failure and returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static int ufs_mtk_init(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) const struct of_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct device *dev = hba->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct ufs_mtk_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) host->hba = hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ufshcd_set_variant(hba, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) id = of_match_device(ufs_mtk_of_match, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Initialize host capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ufs_mtk_init_host_caps(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err = ufs_mtk_bind_mphy(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) goto out_variant_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ufs_mtk_init_reset(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Enable runtime autosuspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Enable clock-gating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) hba->caps |= UFSHCD_CAP_CLK_GATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Enable inline encryption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) hba->caps |= UFSHCD_CAP_CRYPTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Enable WriteBooster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) hba->caps |= UFSHCD_CAP_WB_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * ufshcd_vops_init() is invoked after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * phy clock setup is skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Enable phy clocks specifically here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ufs_mtk_mphy_power_on(hba, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) out_variant_clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ufshcd_set_variant(hba, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct ufs_pa_layer_attr *dev_max_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct ufs_pa_layer_attr *dev_req_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct ufs_dev_params host_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u32 adapt_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) host_cap.desired_working_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) UFS_MTK_LIMIT_DESIRED_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ret = ufshcd_get_pwr_dev_param(&host_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_max_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_req_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pr_info("%s: failed to determine capabilities\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (host->hw_ver.major >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (dev_req_params->gear_tx == UFS_HS_G4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) adapt_val = PA_INITIAL_ADAPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) adapt_val = PA_NO_ADAPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) UIC_ARG_MIB(PA_TXHSADAPTTYPE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) adapt_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) enum ufs_notify_change_status stage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct ufs_pa_layer_attr *dev_max_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct ufs_pa_layer_attr *dev_req_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) switch (stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case PRE_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_req_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) case POST_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct ufs_mtk_host *host = ufshcd_get_variant(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ret = ufshcd_dme_set(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!ret || !lpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * Forcibly set as non-LPM mode if UIC commands is failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * to use default hba_enable_delay_us value for re-enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) host->unipro_lpm = lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static int ufs_mtk_pre_link(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ufs_mtk_get_controller_version(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = ufs_mtk_unipro_set_pm(hba, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * Setting PA_Local_TX_LCC_Enable to 0 before link startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * to make sure that both host and device TX LCC are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * once link startup is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret = ufshcd_disable_host_tx_lcc(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* disable deep stall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) tmp &= ~(1 << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) u32 ah_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (ufshcd_is_clkgating_allowed(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) hba->ahit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ah_ms = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) spin_lock_irqsave(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) hba->clk_gating.delay_ms = ah_ms + 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) spin_unlock_irqrestore(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static int ufs_mtk_post_link(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* enable unipro clock gating feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ufs_mtk_cfg_unipro_cg(hba, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* configure auto-hibern8 timer to 10ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ufshcd_is_auto_hibern8_supported(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ufshcd_auto_hibern8_update(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ufs_mtk_setup_clk_gating(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) enum ufs_notify_change_status stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) switch (stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) case PRE_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = ufs_mtk_pre_link(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) case POST_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ret = ufs_mtk_post_link(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int ufs_mtk_device_reset(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* disable hba before device reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ufshcd_hba_stop(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ufs_mtk_device_reset_ctrl(0, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * The reset signal is active low. UFS devices shall detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * more than or equal to 1us of positive or negative RST_n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * pulse width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * To be on safe side, keep the reset low for at least 10us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) usleep_range(10, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ufs_mtk_device_reset_ctrl(1, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Some devices may need time to respond to rst_n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) usleep_range(10000, 15000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dev_info(hba->dev, "device reset done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) err = ufshcd_hba_enable(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err = ufs_mtk_unipro_set_pm(hba, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) err = ufshcd_uic_hibern8_exit(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ufshcd_set_link_active(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) err = ufshcd_make_hba_operational(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) err = ufs_mtk_unipro_set_pm(hba, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Resume UniPro state for following error recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ufs_mtk_unipro_set_pm(hba, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (lpm && !hba->vreg_info.vcc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) regulator_set_mode(hba->vreg_info.vccq2->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) REGULATOR_MODE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) else if (!lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) regulator_set_mode(hba->vreg_info.vccq2->reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) REGULATOR_MODE_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (ufshcd_is_link_hibern8(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = ufs_mtk_link_set_lpm(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (!ufshcd_is_link_active(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Make sure no error will be returned to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * ufshcd_suspend() re-enabling regulators while vreg is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * in low-power mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ufs_mtk_vreg_set_lpm(hba, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) err = ufs_mtk_mphy_power_on(hba, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (ufshcd_is_link_off(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ufs_mtk_device_reset_ctrl(0, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * Set link as off state enforcedly to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * ufshcd_host_reset_and_restore() in ufshcd_suspend()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * for completed host reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ufshcd_set_link_off(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) err = ufs_mtk_mphy_power_on(hba, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ufs_mtk_vreg_set_lpm(hba, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (ufshcd_is_link_hibern8(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err = ufs_mtk_link_set_hpm(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return ufshcd_link_recovery(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) "MPHY Ctrl ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Direct debugging information to REG_MTK_PROBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct ufs_dev_info *dev_info = &hba->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) u16 mid = dev_info->wmanufacturerid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (mid == UFS_VENDOR_SAMSUNG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * Decide waiting time before gating reference clock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * after ungating reference clock according to vendors'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * requirements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (mid == UFS_VENDOR_SAMSUNG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) else if (mid == UFS_VENDOR_SKHYNIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) else if (mid == UFS_VENDOR_TOSHIBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) hba->vreg_info.vcc->always_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * VCC will be kept always-on thus we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * need any delay during regulator operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void ufs_mtk_event_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) enum ufs_event_type evt, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) unsigned int val = *(u32 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) trace_ufs_mtk_event(evt, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * The variant operations configure the necessary controller and PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * handshake during initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .name = "mediatek.ufshci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .init = ufs_mtk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .setup_clocks = ufs_mtk_setup_clocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .hce_enable_notify = ufs_mtk_hce_enable_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .link_startup_notify = ufs_mtk_link_startup_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .pwr_change_notify = ufs_mtk_pwr_change_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .suspend = ufs_mtk_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .resume = ufs_mtk_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) .dbg_register_dump = ufs_mtk_dbg_register_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) .device_reset = ufs_mtk_device_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) .event_notify = ufs_mtk_event_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * ufs_mtk_probe - probe routine of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @pdev: pointer to Platform device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * Return zero for success and non-zero for failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int ufs_mtk_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct device_node *reset_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct platform_device *reset_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) reset_node = of_find_compatible_node(NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) "ti,syscon-reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!reset_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dev_notice(dev, "find ti,syscon-reset fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) goto skip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) reset_pdev = of_find_device_by_node(reset_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (!reset_pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_notice(dev, "find reset_pdev fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto skip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) link = device_link_add(dev, &reset_pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) DL_FLAG_AUTOPROBE_CONSUMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dev_notice(dev, "add reset device_link fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto skip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* supplier is not probed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (link->status == DL_STATE_DORMANT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) err = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) skip_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* perform generic probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev_info(dev, "probe failed %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) of_node_put(reset_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * ufs_mtk_remove - set driver_data of the device to NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * @pdev: pointer to platform device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * Always return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int ufs_mtk_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct ufs_hba *hba = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) pm_runtime_get_sync(&(pdev)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ufshcd_remove(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static const struct dev_pm_ops ufs_mtk_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .suspend = ufshcd_pltfrm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .resume = ufshcd_pltfrm_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .runtime_resume = ufshcd_pltfrm_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .runtime_idle = ufshcd_pltfrm_runtime_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static struct platform_driver ufs_mtk_pltform = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .probe = ufs_mtk_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .remove = ufs_mtk_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) .shutdown = ufshcd_pltfrm_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .name = "ufshcd-mtk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) .pm = &ufs_mtk_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) .of_match_table = ufs_mtk_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) MODULE_DESCRIPTION("MediaTek UFS Host Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) module_platform_driver(ufs_mtk_pltform);