^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Universal Flash Storage Host controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2011-2013 Samsung India Software Operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Santosh Yaraganavi <santosh.sy@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Vinayak Holikatti <h.vinayak@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef _UFSHCD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define _UFSHCD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/devfreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/keyslot-manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "unipro.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <scsi/scsi_dbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "ufs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "ufs_quirks.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "ufshci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define UFSHCD "ufshcd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define UFSHCD_DRIVER_VERSION "0.2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ufs_hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) enum dev_cmd_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) DEV_CMD_TYPE_NOP = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) DEV_CMD_TYPE_QUERY = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enum ufs_event_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* uic specific errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) UFS_EVT_PA_ERR = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) UFS_EVT_DL_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) UFS_EVT_NL_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) UFS_EVT_TL_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) UFS_EVT_DME_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* fatal errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) UFS_EVT_AUTO_HIBERN8_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) UFS_EVT_FATAL_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) UFS_EVT_LINK_STARTUP_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) UFS_EVT_RESUME_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) UFS_EVT_SUSPEND_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* abnormal events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) UFS_EVT_DEV_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) UFS_EVT_HOST_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) UFS_EVT_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) UFS_EVT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * struct uic_command - UIC command structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @command: UIC command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @argument1: UIC command argument 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @argument2: UIC command argument 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @argument3: UIC command argument 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @cmd_active: Indicate if UIC command is outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @done: UIC command completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct uic_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 argument1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 argument2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 argument3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int cmd_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Used to differentiate the power management options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) enum ufs_pm_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) UFS_RUNTIME_PM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) UFS_SYSTEM_PM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) UFS_SHUTDOWN_PM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Host <-> Device UniPro Link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) enum uic_link_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) UIC_LINK_ACTIVE_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) UIC_LINK_HIBERN8_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) UIC_LINK_BROKEN_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) UIC_LINK_ACTIVE_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) UIC_LINK_HIBERN8_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) UIC_LINK_BROKEN_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define ufshcd_set_ufs_dev_active(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define ufshcd_set_ufs_dev_sleep(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define ufshcd_set_ufs_dev_poweroff(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define ufshcd_is_ufs_dev_active(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define ufshcd_is_ufs_dev_sleep(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define ufshcd_is_ufs_dev_poweroff(h) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * UFS Power management levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Each level is in increasing order of power savings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) enum ufs_pm_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) UFS_PM_LVL_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ufs_pm_lvl_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) enum ufs_dev_pwr_mode dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) enum uic_link_state link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * struct ufshcd_lrb - local reference block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @utr_descriptor_ptr: UTRD address of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @ucd_req_ptr: UCD address of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @ucd_rsp_ptr: Response UPIU address for this command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @ucd_prdt_ptr: PRDT address of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @utrd_dma_addr: UTRD dma address for debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @ucd_prdt_dma_addr: PRDT dma address for debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @ucd_rsp_dma_addr: UPIU response dma address for debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @ucd_req_dma_addr: UPIU request dma address for debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @cmd: pointer to SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @sense_buffer: pointer to sense buffer address of the SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @sense_bufflen: Length of the sense buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @scsi_status: SCSI status of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @command_type: SCSI, UFS, Query.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @task_tag: Task tag of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @lun: LUN of the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @issue_time_stamp: time stamp for debug purposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @compl_time_stamp: time stamp for statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @data_unit_num: the data unit number for the first block for inline crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @req_abort_skip: skip request abort task flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct ufshcd_lrb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct utp_transfer_req_desc *utr_descriptor_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct utp_upiu_req *ucd_req_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct utp_upiu_rsp *ucd_rsp_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ufshcd_sg_entry *ucd_prdt_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_addr_t utrd_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dma_addr_t ucd_req_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dma_addr_t ucd_rsp_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dma_addr_t ucd_prdt_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u8 *sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int sense_bufflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int command_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int task_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u8 lun; /* UPIU LUN id field is only 8-bit wide */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bool intr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ktime_t issue_time_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ktime_t compl_time_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #ifdef CONFIG_SCSI_UFS_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int crypto_key_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 data_unit_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bool req_abort_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * struct ufs_query - holds relevant data structures for query request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @request: request upiu and function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * @descriptor: buffer for sending/receiving descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @response: response upiu and response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct ufs_query {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct ufs_query_req request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 *descriptor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct ufs_query_res response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * struct ufs_dev_cmd - all assosiated fields with device management commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @type: device management command type - Query, NOP OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @lock: lock to allow one command at a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @complete: internal commands completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct ufs_dev_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) enum dev_cmd_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct completion *complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ufs_query query;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * struct ufs_clk_info - UFS clock related info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @list: list headed by hba->clk_list_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @clk: clock node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @name: clock name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @max_freq: maximum frequency supported by the clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @min_freq: min frequency that can be used for clock scaling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @curr_freq: indicates the current frequency that it is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @keep_link_active: indicates that the clk should not be disabled if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) link is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @enabled: variable to check against multiple enable/disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct ufs_clk_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 min_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 curr_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) bool keep_link_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) enum ufs_notify_change_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) PRE_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) POST_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct ufs_pa_layer_attr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 gear_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 gear_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 lane_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 lane_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 pwr_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 pwr_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 hs_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct ufs_pwr_mode_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) bool is_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct ufs_pa_layer_attr info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * struct ufs_hba_variant_ops - variant specific callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @name: variant name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @init: called when the driver is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @exit: called to cleanup everything done in init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @get_ufs_hci_version: called to get UFS HCI version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @clk_scale_notify: notifies that clks are scaled up/down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * @setup_clocks: called before touching any of the controller registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @setup_regulators: called before accessing the host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @hce_enable_notify: called before and after HCE enable bit is set to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * variant specific Uni-Pro initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @link_startup_notify: called before and after Link startup is carried out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * to allow variant specific Uni-Pro initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @pwr_change_notify: called before and after a power mode change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * is carried out to allow vendor spesific capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @setup_xfer_req: called before any transfer request is issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * to set some things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @setup_task_mgmt: called before any task management request is issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * to set some things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * @hibern8_notify: called around hibern8 enter/exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @apply_dev_quirks: called to apply device specific quirks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @suspend: called during host controller PM callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @resume: called during host controller PM callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @dbg_register_dump: used to dump controller debug information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * @phy_initialization: used to initialize phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @device_reset: called to issue a reset pulse on the UFS device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @program_key: program or evict an inline encryption key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * @event_notify: called to notify important events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct ufs_hba_variant_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int (*init)(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void (*exit)(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u32 (*get_ufs_hci_version)(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int (*clk_scale_notify)(struct ufs_hba *, bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) enum ufs_notify_change_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int (*setup_clocks)(struct ufs_hba *, bool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) enum ufs_notify_change_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int (*setup_regulators)(struct ufs_hba *, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int (*hce_enable_notify)(struct ufs_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) enum ufs_notify_change_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int (*link_startup_notify)(struct ufs_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) enum ufs_notify_change_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int (*pwr_change_notify)(struct ufs_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) enum ufs_notify_change_status status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct ufs_pa_layer_attr *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct ufs_pa_layer_attr *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void (*setup_xfer_req)(struct ufs_hba *, int, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) enum ufs_notify_change_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int (*apply_dev_quirks)(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) void (*fixup_dev_quirks)(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int (*resume)(struct ufs_hba *, enum ufs_pm_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void (*dbg_register_dump)(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int (*phy_initialization)(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int (*device_reset)(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void (*config_scaling_param)(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct devfreq_dev_profile *profile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int (*program_key)(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) const union ufs_crypto_cfg_entry *cfg, int slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void (*event_notify)(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) enum ufs_event_type evt, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ANDROID_KABI_RESERVE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ANDROID_KABI_RESERVE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ANDROID_KABI_RESERVE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* clock gating state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) enum clk_gating_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) CLKS_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) CLKS_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) REQ_CLKS_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) REQ_CLKS_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * struct ufs_clk_gating - UFS clock gating related info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @gate_work: worker to turn off clocks after some delay as specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * delay_ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @ungate_work: worker to turn on clocks that will be used in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * interrupt context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @state: the current clocks state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @delay_ms: gating delay in ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @is_suspended: clk gating is suspended when set to 1 which can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * during suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @delay_attr: sysfs attribute to control delay_attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @enable_attr: sysfs attribute to enable/disable clock gating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @is_enabled: Indicates the current status of clock gating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @is_initialized: Indicates whether clock gating is initialized or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @active_reqs: number of requests that are pending and should be waited for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * completion before gating clocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct ufs_clk_gating {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct delayed_work gate_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct work_struct ungate_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) enum clk_gating_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long delay_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bool is_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct device_attribute delay_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct device_attribute enable_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bool is_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bool is_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int active_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct workqueue_struct *clk_gating_workq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct ufs_saved_pwr_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct ufs_pa_layer_attr info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bool is_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * struct ufs_clk_scaling - UFS clock scaling related data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @active_reqs: number of requests that are pending. If this is zero when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * devfreq ->target() function is called then schedule "suspend_work" to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * suspend devfreq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @tot_busy_t: Total busy time in current polling window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @window_start_t: Start time (in jiffies) of the current polling window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @busy_start_t: Start time of current busy period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @enable_attr: sysfs attribute to enable/disable clock scaling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @saved_pwr_info: UFS power mode may also be changed during scaling and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * one keeps track of previous power mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @workq: workqueue to schedule devfreq suspend/resume work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @suspend_work: worker to suspend devfreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * @resume_work: worker to resume devfreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @min_gear: lowest HS gear to scale down to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @is_enabled: tracks if scaling is currently enabled or not, controlled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) clkscale_enable sysfs node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @is_allowed: tracks if scaling is currently allowed or not, used to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) clock scaling which is not invoked from devfreq governor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @is_initialized: Indicates whether clock scaling is initialized or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * @is_busy_started: tracks if busy period has started or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @is_suspended: tracks if devfreq is suspended or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct ufs_clk_scaling {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int active_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned long tot_busy_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ktime_t window_start_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ktime_t busy_start_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct device_attribute enable_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct ufs_saved_pwr_info saved_pwr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct workqueue_struct *workq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct work_struct suspend_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct work_struct resume_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u32 min_gear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool is_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) bool is_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) bool is_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) bool is_busy_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) bool is_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #define UFS_EVENT_HIST_LENGTH 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * struct ufs_event_hist - keeps history of errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @pos: index to indicate cyclic buffer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @reg: cyclic buffer for registers value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @tstamp: cyclic buffer for time stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @cnt: error counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct ufs_event_hist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 val[UFS_EVENT_HIST_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned long long cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * struct ufs_stats - keeps usage/err statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @last_intr_status: record the last interrupt status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * @last_intr_ts: record the last interrupt timestamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * @hibern8_exit_cnt: Counter to keep track of number of exits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * reset this after link-startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Clear after the first successful command completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct ufs_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u32 last_intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ktime_t last_intr_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u32 hibern8_exit_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ktime_t last_hibern8_exit_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct ufs_event_hist event[UFS_EVT_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) enum ufshcd_quirks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Interrupt aggregation support is broken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * delay before each dme command is required as the unipro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * layer has shown instabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * If UFS host controller is having issue in processing LCC (Line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Control Command) coming from device then enable this quirk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * When this quirk is enabled, host controller driver should disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * attribute of device to 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * The attribute PA_RXHSUNTERMCAP specifies whether or not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * inbound Link supports unterminated line in HS mode. Setting this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * attribute to 1 fixes moving to HS gear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * This quirk needs to be enabled if the host controller only allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * accessing the peer dme attributes in AUTO mode (FAST AUTO or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * SLOW AUTO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * This quirk needs to be enabled if the host controller doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * advertise the correct version in UFS_VER register. If this quirk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * is enabled, standard UFS host driver will call the vendor specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * ops (get_ufs_hci_version) to get the correct version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Clear handling for transfer/task request list is just opposite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * This quirk needs to be enabled if host controller doesn't allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * that the interrupt aggregation timer and counter are reset by s/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * This quirks needs to be enabled if host controller cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * enabled via HCE register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * This quirk needs to be enabled if the host controller regards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * resolution of the values of PRDTO and PRDTL in UTRD as byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * This quirk needs to be enabled if the host controller reports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * OCS FATAL ERROR with device error through sense data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * This quirk needs to be enabled if the host controller has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * auto-hibernate capability but it doesn't work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * This quirk needs to disable manual flush for write booster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * This quirk needs to disable unipro timeout values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * before power mode change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * This quirk allows only sg entries aligned with page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * This quirk needs to be enabled if the host controller does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * support UIC command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * This quirk needs to be enabled if the host controller cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * support interface configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) UFSHCD_QUIRK_SKIP_INTERFACE_CONFIGURATION = 1 << 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * This quirk needs to be enabled if the host controller supports inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * encryption, but it needs to initialize the crypto capabilities in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * nonstandard way and/or it needs to override blk_ksm_ll_ops. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * enabled, the standard code won't initialize the blk_keyslot_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * ufs_hba_variant_ops::init() must do it instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) UFSHCD_QUIRK_CUSTOM_KEYSLOT_MANAGER = 1 << 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * This quirk needs to be enabled if the host controller supports inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * encryption, but the CRYPTO_GENERAL_ENABLE bit is not implemented and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * breaks the HCE sequence if used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * This quirk needs to be enabled if the host controller requires that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * the PRDT be cleared after each encrypted request because encryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * keys were stored in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) enum ufshcd_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Allow dynamic clk gating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) UFSHCD_CAP_CLK_GATING = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Allow hiberb8 with clk gating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Allow dynamic clk scaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) UFSHCD_CAP_CLK_SCALING = 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* Allow auto bkops to enabled during runtime suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * This capability allows host controller driver to use the UFS HCI's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * interrupt aggregation capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * CAUTION: Enabling this might reduce overall UFS throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) UFSHCD_CAP_INTR_AGGR = 1 << 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * This capability allows the device auto-bkops to be always enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * except during suspend (both runtime and suspend).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Enabling this capability means that device will always be allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * to do background operation when it's active but it might degrade
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * the performance of ongoing read/write operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * This capability allows host controller driver to automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * enable runtime power management by itself instead of waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * for userspace to control the power management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * This capability allows the host controller driver to turn-on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * WriteBooster, if the underlying device supports it and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * provisioned to be used. This would increase the write performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) UFSHCD_CAP_WB_EN = 1 << 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * This capability allows the host controller driver to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * inline crypto engine, if it is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) UFSHCD_CAP_CRYPTO = 1 << 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * This capability allows the controller regulators to be put into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * lpm mode aggressively during clock gating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * This would increase power savings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct ufs_hba_variant_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct devfreq_dev_profile devfreq_profile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct devfreq_simple_ondemand_data ondemand_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u16 hba_enable_delay_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 wb_flush_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #ifdef CONFIG_SCSI_UFS_HPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * struct ufshpb_dev_info - UFSHPB device related info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @num_lu: the number of user logical unit to check whether all lu finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * @rgn_size: device reported HPB region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * @srgn_size: device reported HPB sub-region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * @slave_conf_cnt: counter to check all lu finished initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * @hpb_disabled: flag to check if HPB is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * @is_legacy: flag to check HPB 1.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * @control_mode: either host or device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct ufshpb_dev_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int num_lu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int rgn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int srgn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) atomic_t slave_conf_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bool hpb_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u8 max_hpb_single_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) bool is_legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u8 control_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct ufs_hba_monitor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unsigned long chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned long nr_sec_rw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ktime_t total_busy[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned long nr_req[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* latencies*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ktime_t lat_sum[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ktime_t lat_max[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ktime_t lat_min[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u32 nr_queued[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ktime_t busy_start_ts[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ktime_t enabled_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * struct ufs_hba - per adapter private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * @mmio_base: UFSHCI base register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * @ucdl_base_addr: UFS Command Descriptor base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * @utrdl_base_addr: UTP Transfer Request Descriptor base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @utmrdl_base_addr: UTP Task Management Descriptor base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @ucdl_dma_addr: UFS Command Descriptor DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * @utrdl_dma_addr: UTRDL DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @utmrdl_dma_addr: UTMRDL DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @host: Scsi_Host instance of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @dev: device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * @lrb: local reference block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * @outstanding_tasks: Bits representing outstanding task requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * @outstanding_reqs: Bits representing outstanding transfer requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * @capabilities: UFS Controller Capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * @nutrs: Transfer Request Queue depth supported by controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @nutmrs: Task Management Queue depth supported by controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * @ufs_version: UFS Version to which controller complies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * @vops: pointer to variant specific operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * @priv: pointer to variant specific private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * @irq: Irq number of the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @active_uic_cmd: handle of active UIC command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * @uic_cmd_mutex: mutex for uic command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * @tmf_tag_set: TMF tag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * @tmf_queue: Used to allocate TMF tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * @pwr_done: completion for power mode change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * @ufshcd_state: UFSHCD states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @eh_flags: Error handling flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * @intr_mask: Interrupt Mask Bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @ee_ctrl_mask: Exception event control mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * @is_powered: flag to check if HBA is powered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @shutting_down: flag to check if shutdown has been invoked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @host_sem: semaphore used to serialize concurrent contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @eh_wq: Workqueue that eh_work works on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * @eh_work: Worker to handle UFS errors that require s/w attention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * @eeh_work: Worker to handle exception events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * @errors: HBA errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * @uic_error: UFS interconnect layer error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * @saved_err: sticky error mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @saved_uic_err: sticky UIC error mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * @force_reset: flag to force eh_work perform a full reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * @force_pmc: flag to force a power mode change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * @silence_err_logs: flag to silence error logs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * @dev_cmd: ufs device management command information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * @last_dme_cmd_tstamp: time stamp of the last completed DME command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * @auto_bkops_enabled: to track whether bkops is enabled in device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * @vreg_info: UFS device voltage regulator information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @clk_list_head: UFS host controller clocks list node head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @pwr_info: holds current power mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * @max_pwr_info: keeps the device max valid pwm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * @desc_size: descriptor sizes reported by device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * @urgent_bkops_lvl: keeps track of urgent bkops level for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * device is known or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @scsi_block_reqs_cnt: reference counting for scsi block requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * @crypto_capabilities: Content of crypto capabilities register (0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * @crypto_cap_array: Array of crypto capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * @crypto_cfg_register: Start of the crypto cfg array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * @ksm: the keyslot manager tied to this hba
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct ufs_hba {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) void __iomem *mmio_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Virtual memory reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct utp_transfer_cmd_desc *ucdl_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct utp_transfer_req_desc *utrdl_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct utp_task_req_desc *utmrdl_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* DMA memory reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dma_addr_t ucdl_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dma_addr_t utrdl_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dma_addr_t utmrdl_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct request_queue *cmd_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * This field is to keep a reference to "scsi_device" corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * "UFS device" W-LU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct scsi_device *sdev_ufs_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct scsi_device *sdev_rpmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) enum ufs_dev_pwr_mode curr_dev_pwr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) enum uic_link_state uic_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Desired UFS power management level during runtime PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) enum ufs_pm_level rpm_lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Desired UFS power management level during system PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) enum ufs_pm_level spm_lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct device_attribute rpm_lvl_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct device_attribute spm_lvl_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int pm_op_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Auto-Hibernate Idle Timer register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u32 ahit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct ufshcd_lrb *lrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned long outstanding_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned long outstanding_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u32 capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int nutrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int nutmrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * This has been moved into struct ufs_hba_add_info because of the GKI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 reserved_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 ufs_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) const struct ufs_hba_variant_ops *vops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct ufs_hba_variant_params *vps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) size_t sg_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) bool is_irq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) enum ufs_ref_clk_freq dev_ref_clk_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) unsigned int quirks; /* Deviations from standard UFSHCI spec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Device deviations from standard UFS device spec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned int dev_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct blk_mq_tag_set tmf_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct request_queue *tmf_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * This has been moved into struct ufs_hba_add_info because of the GKI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct request **tmf_rqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct uic_command *active_uic_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct mutex uic_cmd_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct completion *uic_async_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) u32 ufshcd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) u32 eh_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u32 intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) u16 ee_ctrl_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) bool is_powered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bool shutting_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct semaphore host_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Work Queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct workqueue_struct *eh_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct work_struct eh_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct work_struct eeh_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* HBA Errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) u32 uic_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u32 saved_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u32 saved_uic_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct ufs_stats ufs_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) bool force_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) bool force_pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) bool silence_err_logs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Device management request data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct ufs_dev_cmd dev_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ktime_t last_dme_cmd_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Keeps information of the UFS device connected to this host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct ufs_dev_info dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) bool auto_bkops_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct ufs_vreg_info vreg_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct list_head clk_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) bool wlun_dev_clr_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* Number of requests aborts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int req_abort_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Number of lanes available (1 or 2) for Rx/Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) u32 lanes_per_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct ufs_pa_layer_attr pwr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct ufs_pwr_mode_info max_pwr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct ufs_clk_gating clk_gating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* Control to enable/disable host capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) u32 caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct devfreq *devfreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct ufs_clk_scaling clk_scaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bool is_sys_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) enum bkops_status urgent_bkops_lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bool is_urgent_bkops_lvl_checked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct rw_semaphore clk_scaling_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) unsigned char desc_size[QUERY_DESC_IDN_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) atomic_t scsi_block_reqs_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct device bsg_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct request_queue *bsg_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bool wb_buf_flush_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) bool wb_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct delayed_work rpm_dev_flush_recheck_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* This has been moved into struct ufs_hba_add_info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct ufshpb_dev_info ufshpb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct ufs_hba_monitor monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #ifdef CONFIG_SCSI_UFS_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) union ufs_crypto_capabilities crypto_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) union ufs_crypto_cap_entry *crypto_cap_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u32 crypto_cfg_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct blk_keyslot_manager ksm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct dentry *debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ANDROID_KABI_RESERVE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ANDROID_KABI_RESERVE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ANDROID_KABI_RESERVE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Returns true if clocks can be gated. Otherwise false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return hba->caps & UFSHCD_CAP_CLK_GATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return hba->caps & UFSHCD_CAP_CLK_SCALING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) #ifndef CONFIG_SCSI_UFS_DWC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return !!(ufshcd_is_link_hibern8(hba) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return hba->caps & UFSHCD_CAP_WB_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return !hba->shutting_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #define ufshcd_writel(hba, val, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) writel((val), (hba)->mmio_base + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) #define ufshcd_readl(hba, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) readl((hba)->mmio_base + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * ufshcd_rmwl - read modify write into a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * @hba - per adapter instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * @mask - mask to apply on read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * @val - actual value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * @reg - register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tmp = ufshcd_readl(hba, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tmp &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) tmp |= (val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ufshcd_writel(hba, tmp, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int ufshcd_alloc_host(struct device *, struct ufs_hba **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) void ufshcd_dealloc_host(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int ufshcd_hba_enable(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int ufshcd_link_recovery(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int ufshcd_make_hba_operational(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) void ufshcd_remove(struct ufs_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 val, unsigned long interval_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) unsigned long timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void ufshcd_hba_stop(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static inline void check_upiu_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * ufshcd_set_variant - set variant specific data to the hba
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * @hba - per adapter instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * @variant - pointer to variant specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) BUG_ON(!hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) hba->priv = variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * ufshcd_get_variant - get variant specific data from the hba
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * @hba - per adapter instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static inline void *ufshcd_get_variant(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) BUG_ON(!hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return hba->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return hba->dev_info.wb_dedicated_lu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) extern int ufshcd_runtime_resume(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) extern int ufshcd_runtime_idle(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) extern int ufshcd_system_suspend(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) extern int ufshcd_system_resume(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) extern int ufshcd_shutdown(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u8 attr_set, u32 mib_val, u8 peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) u32 *mib_val, u8 peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct ufs_pa_layer_attr *desired_pwr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* UIC command interfaces for DME primitives */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) #define DME_LOCAL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #define DME_PEER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) #define ATTR_SET_NOR 0 /* NORMAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #define ATTR_SET_ST 1 /* STATIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u32 mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) mib_val, DME_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) u32 mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mib_val, DME_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u32 mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) mib_val, DME_PEER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u32 mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) mib_val, DME_PEER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static inline int ufshcd_dme_get(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) u32 attr_sel, u32 *mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) u32 attr_sel, u32 *mib_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return (pwr_info->pwr_rx == FAST_MODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pwr_info->pwr_rx == FASTAUTO_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) (pwr_info->pwr_tx == FAST_MODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) pwr_info->pwr_tx == FASTAUTO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* Expose Query-Request API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) enum query_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) enum desc_idn idn, u8 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) u8 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) u8 *desc_buf, int *buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int ufshcd_read_desc_param(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) enum desc_idn desc_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int desc_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) u8 param_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u8 *param_read_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) u8 param_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) int ufshcd_query_attr_retry(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) u32 *attr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) enum flag_idn idn, u8 index, bool *flag_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int ufshcd_query_flag_retry(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int ufshcd_bkops_ctrl(struct ufs_hba *hba, enum bkops_status status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #define SD_ASCII_STD true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #define SD_RAW false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) u8 **buf, bool ascii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) int ufshcd_hold(struct ufs_hba *hba, bool async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void ufshcd_release(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int *desc_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct utp_upiu_req *req_upiu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct utp_upiu_req *rsp_upiu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int msgcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) u8 *desc_buff, int *buff_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) enum query_opcode desc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* Wrapper functions for safely calling variant operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (hba->vops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return hba->vops->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static inline int ufshcd_vops_init(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (hba->vops && hba->vops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return hba->vops->init(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static inline void ufshcd_vops_exit(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (hba->vops && hba->vops->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return hba->vops->exit(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (hba->vops && hba->vops->get_ufs_hci_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return hba->vops->get_ufs_hci_version(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return ufshcd_readl(hba, REG_UFS_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return (hba->ufs_version >= ufshci_version(3, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) bool up, enum ufs_notify_change_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (hba->vops && hba->vops->clk_scale_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return hba->vops->clk_scale_notify(hba, up, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) enum ufs_event_type evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (hba->vops && hba->vops->event_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) hba->vops->event_notify(hba, evt, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) enum ufs_notify_change_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (hba->vops && hba->vops->setup_clocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return hba->vops->setup_clocks(hba, on, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (hba->vops && hba->vops->setup_regulators)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return hba->vops->setup_regulators(hba, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) bool status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (hba->vops && hba->vops->hce_enable_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return hba->vops->hce_enable_notify(hba, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) bool status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (hba->vops && hba->vops->link_startup_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return hba->vops->link_startup_notify(hba, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) bool status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct ufs_pa_layer_attr *dev_max_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct ufs_pa_layer_attr *dev_req_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (hba->vops && hba->vops->pwr_change_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return hba->vops->pwr_change_notify(hba, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev_max_params, dev_req_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) bool is_scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (hba->vops && hba->vops->setup_xfer_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) spin_lock_irqsave(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) spin_unlock_irqrestore(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) int tag, u8 tm_function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (hba->vops && hba->vops->setup_task_mgmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return hba->vops->setup_task_mgmt(hba, tag, tm_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) enum uic_cmd_dme cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) enum ufs_notify_change_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (hba->vops && hba->vops->hibern8_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return hba->vops->hibern8_notify(hba, cmd, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (hba->vops && hba->vops->apply_dev_quirks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return hba->vops->apply_dev_quirks(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (hba->vops && hba->vops->fixup_dev_quirks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) hba->vops->fixup_dev_quirks(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (hba->vops && hba->vops->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return hba->vops->suspend(hba, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (hba->vops && hba->vops->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return hba->vops->resume(hba, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (hba->vops && hba->vops->dbg_register_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) hba->vops->dbg_register_dump(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (hba->vops && hba->vops->device_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int err = hba->vops->device_reset(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ufshcd_set_ufs_dev_active(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (ufshcd_is_wb_allowed(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) hba->wb_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) hba->wb_buf_flush_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct devfreq_dev_profile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) *profile, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (hba->vops && hba->vops->config_scaling_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) hba->vops->config_scaling_param(hba, profile, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * @scsi_lun: scsi LUN id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * Returns UPIU LUN id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (scsi_is_wlun(scsi_lun))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) | UFS_UPIU_WLUN_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) const char *prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) #endif /* End of Header */