Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Universal Flash Storage Host Performance Booster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	Yongmyung Lee <ymhungry.lee@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *	Jinyoung Choi <j-young.choi@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "ufshcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "ufshcd-add-info.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "ufshpb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "../sd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define READ_TO_MS 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define READ_TO_EXPIRIES 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define POLLING_INTERVAL_MS 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define THROTTLE_MAP_REQ_DEFAULT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /* memory management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static struct kmem_cache *ufshpb_mctx_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static mempool_t *ufshpb_mctx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static mempool_t *ufshpb_page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* A cache size of 2MB can cache ppn in the 1GB range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static unsigned int ufshpb_host_map_kbytes = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static int tot_active_srgn_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static struct workqueue_struct *ufshpb_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 				      int srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static inline struct ufshpb_dev_info *ufs_hba_to_hpb(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	return &ufs_hba_add_info(hba)->hpb_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) bool ufshpb_is_allowed(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	return !(ufs_hba_to_hpb(hba)->hpb_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* HPB version 1.0 is called as legacy version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) bool ufshpb_is_legacy(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	return ufs_hba_to_hpb(hba)->is_legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	return sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static int ufshpb_get_state(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	return atomic_read(&hpb->hpb_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	atomic_set(&hpb->hpb_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 				struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	return rgn->rgn_state != HPB_RGN_INACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		srgn->srgn_state == HPB_SRGN_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	return req_op(cmd->request) == REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	return op_is_write(req_op(cmd->request)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	       op_is_discard(req_op(cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	return transfer_len <= hpb->pre_req_max_tr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * default. It is possible to change range of transfer_len through sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	return len > hpb->pre_req_min_tr_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	       len <= hpb->pre_req_max_tr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static bool ufshpb_is_general_lun(int lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (hpb->lu_pinned_end != PINNED_NOT_SET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	    rgn_idx >= hpb->lu_pinned_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	    rgn_idx <= hpb->lu_pinned_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	if (ufshpb_get_state(hpb) != HPB_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		queue_work(ufshpb_wq, &hpb->map_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 				    struct ufshcd_lrb *lrbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 				    struct utp_hpb_rsp *rsp_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/* Check HPB_UPDATE_ALERT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	      UPIU_HEADER_DWORD(0, 2, 0, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	    rsp_field->desc_type != DEV_DES_TYPE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	    rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	    rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	    rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	    rsp_field->hpb_op == HPB_RSP_NONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	    (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	     !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (!ufshpb_is_general_lun(rsp_field->lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			 lrbp->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			       int srgn_offset, int cnt, bool set_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct ufshpb_subregion *srgn, *prev_srgn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int set_bit_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	int bitmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) next_srgn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (likely(!srgn->is_last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		bitmap_len = hpb->entries_per_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		bitmap_len = hpb->last_srgn_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if ((srgn_offset + cnt) > bitmap_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		set_bit_len = bitmap_len - srgn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		set_bit_len = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		if (set_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		    if (srgn->srgn_state == HPB_SRGN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			    bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				       set_bit_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		} else if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			/* rewind the read timer for lru regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			rgn->read_timeout = ktime_add_ms(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 					rgn->hpb->params.read_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			rgn->read_timeout_expiries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 				rgn->hpb->params.read_timeout_expiries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (hpb->is_hcm && prev_srgn != srgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		bool activate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		spin_lock(&rgn->rgn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (set_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			rgn->reads -= srgn->reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			srgn->reads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			srgn->reads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			rgn->reads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			if (srgn->reads == hpb->params.activation_thld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 				activate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		spin_unlock(&rgn->rgn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		if (activate ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		    test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 				"activate region %d-%d\n", rgn_idx, srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		prev_srgn = srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	srgn_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (++srgn_idx == hpb->srgns_per_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		srgn_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		rgn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	cnt -= set_bit_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	if (cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		goto next_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				  int srgn_idx, int srgn_offset, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	int bitmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	int bit_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) next_srgn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (likely(!srgn->is_last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		bitmap_len = hpb->entries_per_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		bitmap_len = hpb->last_srgn_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!ufshpb_is_valid_srgn(rgn, srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 * If the region state is active, mctx must be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 * In this case, check whether the region is evicted or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * mctx allcation fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (unlikely(!srgn->mctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			"no mctx in region %d subregion %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			srgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if ((srgn_offset + cnt) > bitmap_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		bit_len = bitmap_len - srgn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		bit_len = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			  srgn_offset) < bit_len + srgn_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	srgn_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (++srgn_idx == hpb->srgns_per_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		srgn_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		rgn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	cnt -= bit_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		goto next_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 				     struct ufshpb_map_ctx *mctx, int pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				     int len, __be64 *ppn_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int index, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		copied = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	page = mctx->m_page[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (unlikely(!page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			"error. cannot find page in mctx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	       copied * HPB_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			int *srgn_idx, int *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	int rgn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	*rgn_idx = lpn >> hpb->entries_per_rgn_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	rgn_offset = lpn & hpb->entries_per_rgn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	*srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	*offset = rgn_offset & hpb->entries_per_srgn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			    struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			    u8 transfer_len, int read_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	unsigned char *cdb = lrbp->cmd->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	__be64 ppn_tmp = ppn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	cdb[0] = UFSHPB_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		ppn_tmp = swab64(ppn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/* ppn value is stored as big-endian in the host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	cdb[14] = transfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	cdb[15] = read_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	lrbp->cmd->cmd_len = UFS_CDB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 					    unsigned long lpn, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 					    int read_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	cdb[0] = UFSHPB_WRITE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	put_unaligned_be32(lpn, &cdb[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	cdb[6] = read_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	cdb[9] = 0x00;	/* Control = 0x00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	struct ufshpb_req *pre_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			 "pre_req throttle. inflight %d throttle %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			 hpb->num_inflight_pre_req, hpb->throttle_pre_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 					   struct ufshpb_req, list_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (!pre_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	list_del_init(&pre_req->list_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	hpb->num_inflight_pre_req++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return pre_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				      struct ufshpb_req *pre_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	pre_req->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	bio_reset(pre_req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	hpb->num_inflight_pre_req--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct ufshpb_lu *hpb = pre_req->hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		scsi_command_normalize_sense(cmd, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			"code %x sense_key %x asc %x ascq %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			sshdr.response_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			sshdr.sense_key, sshdr.asc, sshdr.ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			"byte4 %x byte5 %x byte6 %x additional_len %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			sshdr.byte4, sshdr.byte5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			sshdr.byte6, sshdr.additional_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	blk_mq_free_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	ufshpb_put_pre_req(pre_req->hpb, pre_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct ufshpb_lu *hpb = pre_req->hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	__be64 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	unsigned long lpn = pre_req->wb.lpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	int rgn_idx, srgn_idx, srgn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	addr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) next_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!ufshpb_is_valid_srgn(rgn, srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		goto mctx_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (!srgn->mctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		goto mctx_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 					   pre_req->wb.len - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 					   &addr[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (copied < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		goto mctx_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	offset += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	srgn_offset += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (srgn_offset == hpb->entries_per_srgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		srgn_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		if (++srgn_idx == hpb->srgns_per_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			srgn_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			rgn_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	if (offset < pre_req->wb.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		goto next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) mctx_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 				       struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 				       struct ufshpb_req *pre_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct page *page = pre_req->wb.m_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	struct bio *bio = pre_req->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	int entries_bytes, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (ufshpb_prep_entry(pre_req, page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	entries_bytes = pre_req->wb.len * sizeof(__be64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (ret != entries_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			"bio_add_pc_page fail: %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		hpb->cur_read_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return hpb->cur_read_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				  struct ufshpb_req *pre_req, int read_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct scsi_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct bio *bio = pre_req->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	pre_req->hpb = hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	pre_req->wb.lpn = sectors_to_logical(cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 					     blk_rq_pos(cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	pre_req->wb.len = sectors_to_logical(cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 					     blk_rq_sectors(cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	req = pre_req->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* 1. request setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	blk_rq_append_bio(req, &bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	req->rq_disk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	req->end_io_data = (void *)pre_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	req->end_io = ufshpb_pre_req_compl_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* 2. scsi_request setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	rq = scsi_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	rq->retries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 				 read_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	rq->cmd_len = scsi_command_size(rq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	hpb->stats.pre_req_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				int *read_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	struct ufshpb_req *pre_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	int _read_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	req = blk_get_request(cmd->device->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			      REQ_OP_SCSI_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	pre_req = ufshpb_get_pre_req(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (!pre_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	_read_id = ufshpb_get_read_id(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	pre_req->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		goto free_pre_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	*read_id = _read_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) free_pre_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	ufshpb_put_pre_req(hpb, pre_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	blk_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * This function will set up HPB read command using host-side L2P map data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct scsi_cmnd *cmd = lrbp->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	u32 lpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	__be64 ppn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	int transfer_len, rgn_idx, srgn_idx, srgn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	int read_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	hpb = ufshpb_get_hpb_data(cmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	if (ufshpb_get_state(hpb) == HPB_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			   "%s: ufshpb state is not PRESENT", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (blk_rq_is_scsi(cmd->request) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	    (!ufshpb_is_write_or_discard(cmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	     !ufshpb_is_read_cmd(cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	transfer_len = sectors_to_logical(cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 					  blk_rq_sectors(cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (unlikely(!transfer_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* If command type is WRITE or DISCARD, set bitmap as drity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (ufshpb_is_write_or_discard(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				   transfer_len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (!ufshpb_is_supported_chunk(hpb, transfer_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * in host control mode, reads are the main source for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 * activation trials.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				   transfer_len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		/* keep those counters normalized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		if (rgn->reads > hpb->entries_per_srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			schedule_work(&hpb->ufshpb_normalization_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 				   transfer_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		hpb->stats.miss_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * In this case, the region state is active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * but the ppn table is not allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 * Make sure that ppn table must be allocated on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		 * active state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		dev_err(hba->dev, "get ppn failed. err %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (!ufshpb_is_legacy(hba) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	    ufshpb_is_required_wb(hpb, transfer_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 				  hpb->params.requeue_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			if (time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			hpb->stats.miss_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 				    read_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	hpb->stats.hit_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 					 int rgn_idx, enum req_opf dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 					 bool atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct ufshpb_req *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	int retries = HPB_MAP_REQ_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (!rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			      BLK_MQ_REQ_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		usleep_range(3000, 3100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		goto free_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	rq->hpb = hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	rq->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	rq->rb.rgn_idx = rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) free_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	kmem_cache_free(hpb->map_req_cache, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	blk_put_request(rq->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	kmem_cache_free(hpb->map_req_cache, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 					     struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct ufshpb_req *map_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (hpb->is_hcm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	    hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			 "map_req throttle. inflight %d throttle %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			 hpb->num_inflight_map_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			 hpb->params.inflight_map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (!map_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		ufshpb_put_req(hpb, map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	map_req->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	map_req->rb.srgn_idx = srgn->srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	map_req->rb.mctx = srgn->mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	spin_lock_irqsave(&hpb->param_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	hpb->num_inflight_map_req++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	spin_unlock_irqrestore(&hpb->param_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	return map_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			       struct ufshpb_req *map_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	bio_put(map_req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	ufshpb_put_req(hpb, map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	spin_lock_irqsave(&hpb->param_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	hpb->num_inflight_map_req--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	spin_unlock_irqrestore(&hpb->param_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				     struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	u32 num_entries = hpb->entries_per_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (!srgn->mctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			"no mctx in region %d subregion %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			srgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (unlikely(srgn->is_last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		num_entries = hpb->last_srgn_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 				      int srgn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	list_del_init(&rgn->list_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (list_empty(&srgn->list_act_srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	hpb->stats.rb_active_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	for_each_sub_region(rgn, srgn_idx, srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		list_del_init(&srgn->list_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (list_empty(&rgn->list_inact_rgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	hpb->stats.rb_inactive_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				      struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	 * If there is no mctx in subregion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	 * after I/O progress for HPB_READ_BUFFER, the region to which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	 * subregion belongs was evicted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * Make sure the region must not evict in I/O progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (!srgn->mctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			"no mctx in region %d subregion %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			srgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		srgn->srgn_state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			"region %d subregion %d evicted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			srgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		srgn->srgn_state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	srgn->srgn_state = HPB_SRGN_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	ufshpb_put_req(umap_req->hpb, umap_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct ufshpb_lu *hpb = map_req->hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		map_req->rb.srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ufshpb_clear_dirty_bitmap(hpb, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	ufshpb_activate_subregion(hpb, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	ufshpb_put_map_req(map_req->hpb, map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	cdb[0] = UFSHPB_WRITE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			  UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	cdb[9] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				    int srgn_idx, int srgn_mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	cdb[0] = UFSHPB_READ_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	cdb[1] = UFSHPB_READ_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	put_unaligned_be16(rgn_idx, &cdb[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	put_unaligned_be16(srgn_idx, &cdb[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	put_unaligned_be24(srgn_mem_size, &cdb[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	cdb[9] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				   struct ufshpb_req *umap_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				   struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct scsi_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	req = umap_req->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	req->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	req->end_io_data = (void *)umap_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	rq = scsi_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	ufshpb_set_unmap_cmd(rq->cmd, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	hpb->stats.umap_req_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				  struct ufshpb_req *map_req, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct scsi_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	int mem_size = hpb->srgn_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	q = hpb->sdev_ufs_lu->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	for (i = 0; i < hpb->pages_per_srgn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				      PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		if (ret != PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				   "bio_add_pc_page fail %d - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	req = map_req->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	blk_rq_append_bio(req, &map_req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	req->end_io_data = map_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	rq = scsi_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (unlikely(last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				map_req->rb.srgn_idx, mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	hpb->stats.map_req_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 						 bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct ufshpb_map_ctx *mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u32 num_entries = hpb->entries_per_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (!mctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (!mctx->m_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		goto release_mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (unlikely(last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		num_entries = hpb->last_srgn_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (!mctx->ppn_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		goto release_m_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	for (i = 0; i < hpb->pages_per_srgn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (!mctx->m_page[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				mempool_free(mctx->m_page[j], ufshpb_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			goto release_ppn_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		clear_page(page_address(mctx->m_page[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	return mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) release_ppn_dirty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	bitmap_free(mctx->ppn_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) release_m_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) release_mctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	mempool_free(mctx, ufshpb_mctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			       struct ufshpb_map_ctx *mctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	for (i = 0; i < hpb->pages_per_srgn; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		mempool_free(mctx->m_page[i], ufshpb_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	bitmap_free(mctx->ppn_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	mempool_free(mctx, ufshpb_mctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 					  struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	for_each_sub_region(rgn, srgn_idx, srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		if (srgn->srgn_state == HPB_SRGN_ISSUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void ufshpb_read_to_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 					     ufshpb_read_to_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct victim_select_info *lru_info = &hpb->lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct ufshpb_region *rgn, *next_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	unsigned int poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	LIST_HEAD(expired_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				 list_lru_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		if (timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			rgn->read_timeout_expiries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			if (is_rgn_dirty(rgn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			    rgn->read_timeout_expiries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				list_add(&rgn->list_expired_rgn, &expired_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				rgn->read_timeout = ktime_add_ms(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 						hpb->params.read_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				 list_expired_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		list_del_init(&rgn->list_expired_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	ufshpb_kick_map_work(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	poll = hpb->params.timeout_polling_interval_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	schedule_delayed_work(&hpb->ufshpb_read_to_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			      msecs_to_jiffies(poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	rgn->rgn_state = HPB_RGN_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	atomic_inc(&lru_info->active_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (rgn->hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		rgn->read_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			ktime_add_ms(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				     rgn->hpb->params.read_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		rgn->read_timeout_expiries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			rgn->hpb->params.read_timeout_expiries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	struct victim_select_info *lru_info = &hpb->lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	struct ufshpb_region *rgn, *victim_rgn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (!rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				"%s: no region allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		 * in host control mode, verify that the exiting region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		 * has less reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		if (hpb->is_hcm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		    rgn->reads > hpb->params.eviction_thld_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		victim_rgn = rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	return victim_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				    struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	list_del_init(&rgn->list_lru_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	rgn->rgn_state = HPB_RGN_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	atomic_dec(&lru_info->active_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 					  struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (srgn->srgn_state != HPB_SRGN_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		ufshpb_put_map_ctx(hpb, srgn->mctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		srgn->srgn_state = HPB_SRGN_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		srgn->mctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				 struct ufshpb_region *rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				 bool atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	struct ufshpb_req *umap_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	int rgn_idx = rgn ? rgn->rgn_idx : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (!umap_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	ufshpb_execute_umap_req(hpb, umap_req, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 					struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	return ufshpb_issue_umap_req(hpb, rgn, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return ufshpb_issue_umap_req(hpb, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 				 struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	struct victim_select_info *lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	lru_info = &hpb->lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	ufshpb_cleanup_lru_info(lru_info, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	for_each_sub_region(rgn, srgn_idx, srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		ufshpb_purge_active_subregion(hpb, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (rgn->rgn_state == HPB_RGN_PINNED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			 "pinned region cannot drop-out. region %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			 rgn->rgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (!list_empty(&rgn->list_lru_rgn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			ret = ufshpb_issue_umap_single_req(hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		__ufshpb_evict_region(hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				struct ufshpb_region *rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct ufshpb_req *map_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	int err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	bool alloc_required = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			   "%s: ufshpb state is not PRESENT\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	    (srgn->srgn_state == HPB_SRGN_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (srgn->srgn_state == HPB_SRGN_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		alloc_required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * If the subregion is already ISSUED state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 * the device and HPB response for map loading is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 * In this case, after finishing the HPB_READ_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	 * the next HPB_READ_BUFFER is performed again to obtain the latest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	 * map data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (srgn->srgn_state == HPB_SRGN_ISSUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	srgn->srgn_state = HPB_SRGN_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (alloc_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		if (!srgn->mctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			    "get map_ctx failed. region %d - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			    rgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			state = HPB_SRGN_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			goto change_srgn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	map_req = ufshpb_get_map_req(hpb, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	if (!map_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		goto change_srgn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			   "%s: issue map_req failed: %d, region %d - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			   __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		goto free_map_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) free_map_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	ufshpb_put_map_req(hpb, map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) change_srgn_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	srgn->srgn_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct ufshpb_region *victim_rgn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	struct victim_select_info *lru_info = &hpb->lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 * If region belongs to lru_list, just move the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 * to the front of lru list. because the state of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 * is already active-state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	if (!list_empty(&rgn->list_lru_rgn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		ufshpb_hit_lru_info(lru_info, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (rgn->rgn_state == HPB_RGN_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		if (atomic_read(&lru_info->active_cnt) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		    lru_info->max_lru_active_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			 * If the maximum number of active regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			 * is exceeded, evict the least recently used region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			 * This case may occur when the device responds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			 * to the eviction information late.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			 * It is okay to evict the least recently used region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			 * because the device could detect this region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			 * by not issuing HPB_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			 * in host control mode, verify that the entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			 * region has enough reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			if (hpb->is_hcm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			    rgn->reads < hpb->params.eviction_thld_enter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				ret = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			victim_rgn = ufshpb_victim_lru_info(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			if (!victim_rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				    "cannot get victim region %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				    hpb->is_hcm ? "" : "error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				"LRU full (%d), choose victim %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				atomic_read(&lru_info->active_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				victim_rgn->rgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 				spin_unlock_irqrestore(&hpb->rgn_state_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 						       flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				ret = ufshpb_issue_umap_single_req(hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 								victim_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				spin_lock_irqsave(&hpb->rgn_state_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 						  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			__ufshpb_evict_region(hpb, victim_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		 * When a region is added to lru_info list_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		 * it is guaranteed that the subregion has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		 * assigned all mctx. If failed, try to receive mctx again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		 * without being added to lru_info list_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		ufshpb_add_lru_info(lru_info, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 					 struct utp_hpb_rsp *rsp_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	int i, rgn_i, srgn_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	 * If the active region and the inactive region are the same,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	 * we will inactivate this region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	 * The device could check this (region inactivated) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	 * will response the proper active region information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		rgn_i =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		srgn_i =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		rgn = hpb->rgn_tbl + rgn_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (hpb->is_hcm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		    (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			 * in host control mode, subregion activation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			 * recommendations are only allowed to active regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			 * Also, ignore recommendations for dirty regions - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			 * host will make decisions concerning those by himself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			"activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		spin_lock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		ufshpb_update_active_info(hpb, rgn_i, srgn_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		spin_unlock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		srgn = rgn->srgn_tbl + srgn_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		/* blocking HPB_READ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		spin_lock(&hpb->rgn_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		if (srgn->srgn_state == HPB_SRGN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			srgn->srgn_state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		spin_unlock(&hpb->rgn_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		 * in host control mode the device is not allowed to inactivate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		 * regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			"inactivate(%d) region %d\n", i, rgn_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		spin_lock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		ufshpb_update_inactive_info(hpb, rgn_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		spin_unlock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		rgn = hpb->rgn_tbl + rgn_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		spin_lock(&hpb->rgn_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 				srgn = rgn->srgn_tbl + srgn_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 				if (srgn->srgn_state == HPB_SRGN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 					srgn->srgn_state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		spin_unlock(&hpb->rgn_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (ufshpb_get_state(hpb) == HPB_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		queue_work(ufshpb_wq, &hpb->map_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	struct victim_select_info *lru_info = &hpb->lru_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * This function will parse recommended active subregion information in sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  * data field of response UPIU with SAM_STAT_GOOD state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	int data_seg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (unlikely(lrbp->lun != rsp_field->lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		__shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			if (rsp_field->lun == hpb->lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 				found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (ufshpb_get_state(hpb) == HPB_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	    (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 			   "%s: ufshpb state is not PRESENT/SUSPEND\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 			   __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		& MASK_RSP_UPIU_DATA_SEG_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	/* To flush remained rsp_list, we queue the map_work task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (!data_seg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		if (!ufshpb_is_general_lun(hpb->lun))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		ufshpb_kick_map_work(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	hpb->stats.rb_noti_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	switch (rsp_field->hpb_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	case HPB_RSP_REQ_REGION_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		if (data_seg_len != DEV_DATA_SEG_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				 "%s: data seg length is not same.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 				 __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		ufshpb_rsp_req_region_update(hpb, rsp_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	case HPB_RSP_DEV_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 			 "UFS device lost HPB information during PM.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 			__shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 				struct ufshpb_lu *h = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 				if (h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 					ufshpb_dev_reset_handler(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			   "hpb_op is not available: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			   rsp_field->hpb_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 				   struct ufshpb_region *rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 				   struct ufshpb_subregion *srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (!list_empty(&rgn->list_inact_rgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	if (!list_empty(&srgn->list_act_srgn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 					  struct ufshpb_region *rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 					  struct list_head *pending_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	if (!list_empty(&rgn->list_inact_rgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	for_each_sub_region(rgn, srgn_idx, srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		if (!list_empty(&srgn->list_act_srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	list_add_tail(&rgn->list_inact_rgn, pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 						struct ufshpb_subregion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 						list_act_srgn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		list_del_init(&srgn->list_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		rgn = hpb->rgn_tbl + srgn->rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		ret = ufshpb_add_region(hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			goto active_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		ret = ufshpb_issue_map_req(hpb, rgn, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			    "issue map_req failed. ret %d, region %d - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			    ret, rgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			goto active_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) active_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		   rgn->rgn_idx, srgn->srgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	ufshpb_add_active_list(hpb, rgn, srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	LIST_HEAD(pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 					       struct ufshpb_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 					       list_inact_rgn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		list_del_init(&rgn->list_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		ret = ufshpb_evict_region(hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	list_splice(&pending_list, &hpb->lh_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void ufshpb_normalization_work_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 					     ufshpb_normalization_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	int rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	u8 factor = hpb->params.normalization_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		spin_lock(&rgn->rgn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		rgn->reads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			srgn->reads >>= factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			rgn->reads += srgn->reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		spin_unlock(&rgn->rgn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		/* if region is active but has no reads - inactivate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		spin_lock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		spin_unlock(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static void ufshpb_map_work_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			   "%s: ufshpb state is not PRESENT\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	ufshpb_run_inactive_region_list(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	ufshpb_run_active_subregion_list(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * this function doesn't need to hold lock due to be called in init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * (rgn_state_lock, rsp_list_lock, etc..)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 					    struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 					    struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	int srgn_idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	for_each_sub_region(rgn, srgn_idx, srgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		srgn->srgn_state = HPB_SRGN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		if (!srgn->mctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			dev_err(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 				"alloc mctx for pinned region failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	rgn->rgn_state = HPB_RGN_PINNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	for (i = 0; i < srgn_idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		srgn = rgn->srgn_tbl + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		ufshpb_put_map_ctx(hpb, srgn->mctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 				      struct ufshpb_region *rgn, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	for_each_sub_region(rgn, srgn_idx, srgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		INIT_LIST_HEAD(&srgn->list_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		srgn->rgn_idx = rgn->rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		srgn->srgn_idx = srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		srgn->srgn_state = HPB_SRGN_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	if (unlikely(last && hpb->last_srgn_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		srgn->is_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 				      struct ufshpb_region *rgn, int srgn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	if (!rgn->srgn_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	rgn->srgn_cnt = srgn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 				     struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 				     struct ufshpb_dev_info *hpb_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 				     struct ufshpb_lu_info *hpb_lu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	u32 entries_per_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	u64 rgn_mem_size, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	/* for pre_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if (ufshpb_is_legacy(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	hpb->cur_read_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	hpb->lu_pinned_start = hpb_lu_info->pinned_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		: PINNED_NOT_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	hpb->lru_info.max_lru_active_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			* HPB_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		* HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	tmp = rgn_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	do_div(tmp, HPB_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	entries_per_rgn = (u32)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	hpb->entries_per_rgn_mask = entries_per_rgn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	tmp = rgn_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	do_div(tmp, hpb->srgn_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	hpb->srgns_per_rgn = (int)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 				entries_per_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 				(hpb->srgn_mem_size / HPB_ENTRY_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	hpb->last_srgn_entries = hpb_lu_info->num_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 				 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		hpb->is_hcm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct ufshpb_region *rgn_table, *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	int rgn_idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (!rgn_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	hpb->rgn_tbl = rgn_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		int srgn_cnt = hpb->srgns_per_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		bool last_srgn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		rgn = rgn_table + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		rgn->rgn_idx = rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		spin_lock_init(&rgn->rgn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		INIT_LIST_HEAD(&rgn->list_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		INIT_LIST_HEAD(&rgn->list_lru_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		INIT_LIST_HEAD(&rgn->list_expired_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		if (rgn_idx == hpb->rgns_per_lu - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			srgn_cnt = ((hpb->srgns_per_lu - 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				    hpb->srgns_per_rgn) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			last_srgn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			goto release_srgn_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 				goto release_srgn_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			rgn->rgn_state = HPB_RGN_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		rgn->rgn_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		rgn->hpb = hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) release_srgn_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	for (i = 0; i < rgn_idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		rgn = rgn_table + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		kvfree(rgn->srgn_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	kvfree(rgn_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 					 struct ufshpb_region *rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	int srgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	struct ufshpb_subregion *srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	for_each_sub_region(rgn, srgn_idx, srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		if (srgn->srgn_state != HPB_SRGN_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			srgn->srgn_state = HPB_SRGN_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 			ufshpb_put_map_ctx(hpb, srgn->mctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	int rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		struct ufshpb_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		rgn = hpb->rgn_tbl + rgn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			rgn->rgn_state = HPB_RGN_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			ufshpb_destroy_subregion_tbl(hpb, rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		kvfree(rgn->srgn_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	kvfree(hpb->rgn_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* SYSFS functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) #define ufshpb_sysfs_attr_show_func(__name)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static ssize_t __name##_show(struct device *dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	struct device_attribute *attr, char *buf)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	struct scsi_device *sdev = to_scsi_device(dev);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	if (!hpb)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		return -ENODEV;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	return sysfs_emit(buf, "%llu\n", hpb->stats.__name);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static DEVICE_ATTR_RO(__name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ufshpb_sysfs_attr_show_func(hit_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ufshpb_sysfs_attr_show_func(miss_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) ufshpb_sysfs_attr_show_func(rb_noti_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ufshpb_sysfs_attr_show_func(rb_active_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) ufshpb_sysfs_attr_show_func(map_req_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ufshpb_sysfs_attr_show_func(umap_req_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) static struct attribute *hpb_dev_stat_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	&dev_attr_hit_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	&dev_attr_miss_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	&dev_attr_rb_noti_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	&dev_attr_rb_active_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	&dev_attr_rb_inactive_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	&dev_attr_map_req_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	&dev_attr_umap_req_cnt.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) struct attribute_group ufs_sysfs_hpb_stat_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	.name = "hpb_stats",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	.attrs = hpb_dev_stat_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /* SYSFS functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) #define ufshpb_sysfs_param_show_func(__name)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static ssize_t __name##_show(struct device *dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	struct device_attribute *attr, char *buf)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	struct scsi_device *sdev = to_scsi_device(dev);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	if (!hpb)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		return -ENODEV;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	return sysfs_emit(buf, "%d\n", hpb->params.__name);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) ufshpb_sysfs_param_show_func(requeue_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (val < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	hpb->params.requeue_timeout_ms = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static DEVICE_ATTR_RW(requeue_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) ufshpb_sysfs_param_show_func(activation_thld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) activation_thld_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (val <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	hpb->params.activation_thld = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) static DEVICE_ATTR_RW(activation_thld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) ufshpb_sysfs_param_show_func(normalization_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) normalization_factor_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	hpb->params.normalization_factor = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) static DEVICE_ATTR_RW(normalization_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) ufshpb_sysfs_param_show_func(eviction_thld_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	if (val <= hpb->params.eviction_thld_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	hpb->params.eviction_thld_enter = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) static DEVICE_ATTR_RW(eviction_thld_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) ufshpb_sysfs_param_show_func(eviction_thld_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 			 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	if (val <= hpb->params.activation_thld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	hpb->params.eviction_thld_exit = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static DEVICE_ATTR_RW(eviction_thld_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ufshpb_sysfs_param_show_func(read_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	/* read_timeout >> timeout_polling_interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (val < hpb->params.timeout_polling_interval_ms * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	hpb->params.read_timeout_ms = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static DEVICE_ATTR_RW(read_timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ufshpb_sysfs_param_show_func(read_timeout_expiries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	if (val <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	hpb->params.read_timeout_expiries = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static DEVICE_ATTR_RW(read_timeout_expiries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) timeout_polling_interval_ms_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	/* timeout_polling_interval << read_timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	hpb->params.timeout_polling_interval_ms = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static DEVICE_ATTR_RW(timeout_polling_interval_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ufshpb_sysfs_param_show_func(inflight_map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static ssize_t inflight_map_req_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 				      struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 				      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	if (kstrtouint(buf, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	hpb->params.inflight_map_req = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static DEVICE_ATTR_RW(inflight_map_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	hpb->params.activation_thld = ACTIVATION_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	hpb->params.normalization_factor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	hpb->params.read_timeout_ms = READ_TO_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static struct attribute *hpb_dev_param_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	&dev_attr_requeue_timeout_ms.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	&dev_attr_activation_thld.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	&dev_attr_normalization_factor.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	&dev_attr_eviction_thld_enter.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	&dev_attr_eviction_thld_exit.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	&dev_attr_read_timeout_ms.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	&dev_attr_read_timeout_expiries.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	&dev_attr_timeout_polling_interval_ms.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	&dev_attr_inflight_map_req.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) struct attribute_group ufs_sysfs_hpb_param_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	.name = "hpb_params",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	.attrs = hpb_dev_param_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	struct ufshpb_req *pre_req = NULL, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	int qd = hpb->sdev_ufs_lu->queue_depth / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	INIT_LIST_HEAD(&hpb->lh_pre_req_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	hpb->throttle_pre_req = qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	hpb->num_inflight_pre_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (!hpb->pre_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		goto release_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	for (i = 0; i < qd; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		pre_req = hpb->pre_req + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		INIT_LIST_HEAD(&pre_req->list_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		pre_req->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		pre_req->bio = bio_alloc(GFP_KERNEL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		if (!pre_req->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			goto release_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		if (!pre_req->wb.m_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 			bio_put(pre_req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			goto release_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) release_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		list_del_init(&pre_req->list_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		bio_put(pre_req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		__free_page(pre_req->wb.m_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	kfree(hpb->pre_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	struct ufshpb_req *pre_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	for (i = 0; i < hpb->throttle_pre_req; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		pre_req = hpb->pre_req + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		bio_put(hpb->pre_req[i].bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		if (!pre_req->wb.m_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 			__free_page(hpb->pre_req[i].wb.m_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		list_del_init(&pre_req->list_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	kfree(hpb->pre_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) static void ufshpb_stat_init(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	hpb->stats.hit_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	hpb->stats.miss_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	hpb->stats.rb_noti_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	hpb->stats.rb_active_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	hpb->stats.rb_inactive_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	hpb->stats.map_req_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	hpb->stats.umap_req_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static void ufshpb_param_init(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	if (hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		ufshpb_hcm_param_init(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	spin_lock_init(&hpb->rgn_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	spin_lock_init(&hpb->rsp_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	spin_lock_init(&hpb->param_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	INIT_LIST_HEAD(&hpb->lh_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	INIT_LIST_HEAD(&hpb->lh_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	INIT_LIST_HEAD(&hpb->list_hpb_lu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		INIT_WORK(&hpb->ufshpb_normalization_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			  ufshpb_normalization_work_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 				  ufshpb_read_to_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			  sizeof(struct ufshpb_req), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	if (!hpb->map_req_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 			hpb->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 			  sizeof(struct page *) * hpb->pages_per_srgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 			  0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	if (!hpb->m_page_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 			hpb->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		goto release_req_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	ret = ufshpb_pre_req_mempool_init(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			hpb->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		goto release_m_page_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	ret = ufshpb_alloc_region_tbl(hba, hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		goto release_pre_req_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	ufshpb_stat_init(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	ufshpb_param_init(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		unsigned int poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		poll = hpb->params.timeout_polling_interval_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		schedule_delayed_work(&hpb->ufshpb_read_to_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 				      msecs_to_jiffies(poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) release_pre_req_mempool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	ufshpb_pre_req_mempool_destroy(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) release_m_page_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	kmem_cache_destroy(hpb->m_page_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) release_req_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	kmem_cache_destroy(hpb->map_req_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static struct ufshpb_lu *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		    struct ufshpb_dev_info *hpb_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		    struct ufshpb_lu_info *hpb_lu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	hpb->lun = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	hpb->sdev_ufs_lu = sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	ret = ufshpb_lu_hpb_init(hba, hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		goto release_hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	sdev->hostdata = hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	return hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) release_hpb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	kfree(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	struct ufshpb_region *rgn, *next_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	struct ufshpb_subregion *srgn, *next_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	 * If the device reset occurred, the remained HPB region information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	 * may be stale. Therefore, by dicarding the lists of HPB response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 * that remained after reset, it prevents unnecessary work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 				 list_inact_rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		list_del_init(&rgn->list_inact_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 				 list_act_srgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		list_del_init(&srgn->list_act_srgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		cancel_work_sync(&hpb->ufshpb_normalization_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	cancel_work_sync(&hpb->map_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	bool flag_res = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	int try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	/* wait for the device to complete HPB reset query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		dev_dbg(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 			"%s start flag reset polling %d times\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 			__func__, try);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		/* Poll fHpbReset flag to be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			dev_err(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 				"%s reading fHpbReset flag failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 				__func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			return flag_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		if (!flag_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	if (flag_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		dev_err(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			"%s fHpbReset was not cleared by the device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	return flag_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) void ufshpb_reset(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		if (ufshpb_get_state(hpb) != HPB_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		ufshpb_set_state(hpb, HPB_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) void ufshpb_reset_host(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 		ufshpb_set_state(hpb, HPB_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		ufshpb_cancel_jobs(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		ufshpb_discard_rsp_lists(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) void ufshpb_suspend(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		ufshpb_set_state(hpb, HPB_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		ufshpb_cancel_jobs(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) void ufshpb_resume(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		    (ufshpb_get_state(hpb) != HPB_SUSPEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		ufshpb_set_state(hpb, HPB_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		ufshpb_kick_map_work(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		if (hpb->is_hcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 			unsigned int poll =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 				hpb->params.timeout_polling_interval_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 			schedule_delayed_work(&hpb->ufshpb_read_to_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 				msecs_to_jiffies(poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 			      struct ufshpb_lu_info *hpb_lu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	u16 max_active_rgns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	u8 lu_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	char desc_buf[QUERY_DESC_MAX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	pm_runtime_get_sync(hba->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 					    QUERY_DESC_IDN_UNIT, lun, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 					    desc_buf, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	pm_runtime_put_sync(hba->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		dev_err(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 			"%s: idn: %d lun: %d  query request failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 			__func__, QUERY_DESC_IDN_UNIT, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	if (lu_enable != LU_ENABLED_HPB_FUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	max_active_rgns = get_unaligned_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	if (!max_active_rgns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		dev_err(hba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			"lun %d wrong number of max active regions\n", lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	hpb_lu_info->num_blocks = get_unaligned_be64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 			desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	hpb_lu_info->pinned_start = get_unaligned_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 			desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	hpb_lu_info->num_pinned = get_unaligned_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 			desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	hpb_lu_info->max_active_rgns = max_active_rgns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	ufshpb_set_state(hpb, HPB_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	sdev = hpb->sdev_ufs_lu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	ufshpb_cancel_jobs(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	ufshpb_pre_req_mempool_destroy(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	ufshpb_destroy_region_tbl(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	kmem_cache_destroy(hpb->map_req_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	kmem_cache_destroy(hpb->m_page_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	list_del_init(&hpb->list_hpb_lu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	kfree(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	int pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	bool init_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	if (tot_active_srgn_pages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		ufshpb_remove(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	init_success = !ufshpb_check_hpb_reset_query(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	if (pool_size > tot_active_srgn_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	shost_for_each_device(sdev, hba->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		hpb = ufshpb_get_hpb_data(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		if (init_success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			ufshpb_set_state(hpb, HPB_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 				queue_work(ufshpb_wq, &hpb->map_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			if (!hpb->is_hcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 				ufshpb_issue_umap_all_req(hpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 			dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 			ufshpb_destroy_lu(hba, sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	if (!init_success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		ufshpb_remove(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	struct ufshpb_lu *hpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	struct ufshpb_lu_info hpb_lu_info = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	int lun = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	if (lun >= hba->dev_info.max_lu_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	hpb = ufshpb_alloc_hpb_lu(hba, sdev, ufs_hba_to_hpb(hba), &hpb_lu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	if (!hpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 			hpb->srgns_per_rgn * hpb->pages_per_srgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	/* All LUs are initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	if (atomic_dec_and_test(&ufs_hba_to_hpb(hba)->slave_conf_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		ufshpb_hpb_lu_prepared(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static int ufshpb_init_mem_wq(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	unsigned int pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 					sizeof(struct ufshpb_map_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 					0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	if (!ufshpb_mctx_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	       __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 						    ufshpb_mctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	if (!ufshpb_mctx_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		goto release_mctx_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	if (!ufshpb_page_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		dev_err(hba->dev, "ufshpb: cannot init page pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		goto release_mctx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	ufshpb_wq = alloc_workqueue("ufshpb-wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 					WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (!ufshpb_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		goto release_page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) release_page_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	mempool_destroy(ufshpb_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) release_mctx_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	mempool_destroy(ufshpb_mctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) release_mctx_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	kmem_cache_destroy(ufshpb_mctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	struct ufshpb_dev_info *hpb_info = ufs_hba_to_hpb(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	int max_active_rgns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	int hpb_num_lu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	if (hpb_num_lu == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		dev_err(hba->dev, "No HPB LU supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		hpb_info->hpb_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	max_active_rgns = get_unaligned_be16(geo_buf +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 			  GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	    max_active_rgns == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		dev_err(hba->dev, "No HPB supported device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		hpb_info->hpb_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	int version, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	if ((version != HPB_SUPPORT_VERSION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	    (version != HPB_SUPPORT_LEGACY_VERSION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 			__func__, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		hpb_dev_info->hpb_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	if (version == HPB_SUPPORT_LEGACY_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		hpb_dev_info->is_legacy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	pm_runtime_get_sync(hba->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	pm_runtime_put_sync(hba->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	 * Get the number of user logical unit to check whether all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	 * scsi_device finish initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) void ufshpb_init(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	int try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	if (ufshpb_init_mem_wq(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		hpb_dev_info->hpb_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	tot_active_srgn_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	/* issue HPB reset query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 					QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) void ufshpb_remove(struct ufs_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	mempool_destroy(ufshpb_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	mempool_destroy(ufshpb_mctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	kmem_cache_destroy(ufshpb_mctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	destroy_workqueue(ufshpb_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) module_param(ufshpb_host_map_kbytes, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) MODULE_PARM_DESC(ufshpb_host_map_kbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	"ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");