Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2016 CNEX Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Initial: Javier Gonzalez <javier@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * modify it under the terms of the GNU General Public License version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * 2 as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * pblk-recovery.c - pblk's recovery path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * The L2P recovery path is single threaded as the L2P table is updated in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * following the line sequence ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "pblk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "pblk-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	crc = pblk_calc_emeta_crc(pblk, emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	if (le32_to_cpu(emeta_buf->crc) != crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct pblk_emeta *emeta = line->emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct line_emeta *emeta_buf = emeta->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	__le64 *lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u64 data_start, data_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u64 nr_valid_lbas, nr_lbas = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	lba_list = emeta_to_lbas(pblk, emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (!lba_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	data_end = line->emeta_ssec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	for (i = data_start; i < data_end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		ppa = addr_to_gen_ppa(pblk, i, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		/* Do not update bad blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		if (test_bit(pos, line->blk_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			if (test_and_set_bit(i, line->invalid_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 				WARN_ONCE(1, "pblk: rec. double invalidate:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				le32_add_cpu(line->vsc, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		nr_lbas++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (nr_valid_lbas != nr_lbas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				line->id, nr_valid_lbas, nr_lbas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	line->left_msecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				u64 written_secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	for (i = 0; i < written_secs; i += pblk->min_write_pgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		__pblk_alloc_page(pblk, line, pblk->min_write_pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (written_secs > line->left_msecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		 * We have all data sectors written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		 * and some emeta sectors written too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		line->left_msecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		/* We have only some data sectors written. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		line->left_msecs -= written_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u64 written_secs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int valid_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	for (i = 0; i < lm->blk_per_line; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		struct nvm_chk_meta *chunk = &line->chks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		if (chunk->state & NVM_CHK_ST_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		written_secs += chunk->wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		valid_chunks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (lm->blk_per_line - nr_bb != valid_chunks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		pblk_err(pblk, "recovery line %d is bad\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return written_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct pblk_recov_alloc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct ppa_addr *ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	void *meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct nvm_rq *rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	dma_addr_t dma_ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	dma_addr_t dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void pblk_recov_complete(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	complete(&pad_rq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void pblk_end_io_recov(struct nvm_rq *rqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct pblk_pad_rq *pad_rq = rqd->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct pblk *pblk = pad_rq->pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	pblk_up_chunk(pblk, ppa_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	atomic_dec(&pblk->inflight_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	kref_put(&pad_rq->ref, pblk_recov_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* pad line using line bitmap.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			       int left_ppas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	void *meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct pblk_pad_rq *pad_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct nvm_rq *rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct ppa_addr *ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	u64 w_ptr = line->cur_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	int left_line_ppas, rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	left_line_ppas = line->left_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (!pad_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		goto free_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	pad_rq->pblk = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	init_completion(&pad_rq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	kref_init(&pad_rq->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) next_pad_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (rq_ppas < pblk->min_write_pgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		pblk_err(pblk, "corrupted pad line %d\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		goto fail_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	ret = pblk_alloc_rqd_meta(pblk, rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		goto fail_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	rqd->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	rqd->opcode = NVM_OP_PWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	rqd->is_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	rqd->nr_ppas = rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	rqd->end_io = pblk_end_io_recov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	rqd->private = pad_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	ppa_list = nvm_rq_to_ppa_list(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	meta_list = rqd->meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	for (i = 0; i < rqd->nr_ppas; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		while (test_bit(pos, line->blk_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			w_ptr += pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			struct ppa_addr dev_ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			struct pblk_sec_meta *meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			pblk_map_invalidate(pblk, dev_ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			lba_list[w_ptr] = addr_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			meta = pblk_get_meta(pblk, meta_list, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			meta->lba = addr_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			ppa_list[i] = dev_ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	kref_get(&pad_rq->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	pblk_down_chunk(pblk, ppa_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	ret = pblk_submit_io(pblk, rqd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		pblk_err(pblk, "I/O submission failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		pblk_up_chunk(pblk, ppa_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		kref_put(&pad_rq->ref, pblk_recov_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		goto fail_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	left_line_ppas -= rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	left_ppas -= rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (left_ppas && left_line_ppas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		goto next_pad_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) fail_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	kref_put(&pad_rq->ref, pblk_recov_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	wait_for_completion(&pad_rq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (!pblk_line_is_full(line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		pblk_err(pblk, "corrupted padded line: %d\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	vfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) free_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	kfree(pad_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return (distance > line->left_msecs) ? line->left_msecs : distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Return a chunk belonging to a line by stripe(write order) index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 						  struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 						  int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct pblk_lun *rlun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	rlun = &pblk->luns[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	ppa = rlun->bppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return &line->chks[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 				      struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	int blk_in_line = lm->blk_per_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	struct nvm_chk_meta *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	u64 max_wp, min_wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	/* If there is one or zero good chunks in the line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 * the write pointers can't be unbalanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (i >= (blk_in_line - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	chunk = pblk_get_stripe_chunk(pblk, line, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	max_wp = chunk->wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (max_wp > pblk->max_write_pgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		min_wp = max_wp - pblk->max_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		min_wp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	while (i < blk_in_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		chunk = pblk_get_stripe_chunk(pblk, line, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (chunk->wp > max_wp || chunk->wp < min_wp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			       struct pblk_recov_alloc p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct ppa_addr *ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	void *meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct nvm_rq *rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	dma_addr_t dma_ppa_list, dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	__le64 *lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	bool padded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	int rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (pblk_line_wps_are_unbalanced(pblk, line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	ppa_list = p.ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	meta_list = p.meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	rqd = p.rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	data = p.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	dma_ppa_list = p.dma_ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	dma_meta_list = p.dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	lba_list = emeta_to_lbas(pblk, line->emeta->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) next_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	memset(rqd, 0, pblk_g_rq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (!rq_ppas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		rq_ppas = pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) retry_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	rqd->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	rqd->opcode = NVM_OP_PREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	rqd->meta_list = meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	rqd->nr_ppas = rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	rqd->ppa_list = ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	rqd->dma_ppa_list = dma_ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	rqd->dma_meta_list = dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	ppa_list = nvm_rq_to_ppa_list(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (pblk_io_aligned(pblk, rq_ppas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		rqd->is_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	for (i = 0; i < rqd->nr_ppas; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		ppa = addr_to_gen_ppa(pblk, paddr, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		while (test_bit(pos, line->blk_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			paddr += pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			ppa = addr_to_gen_ppa(pblk, paddr, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		for (j = 0; j < pblk->min_write_pgs; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			ppa_list[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				addr_to_gen_ppa(pblk, paddr + j, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	ret = pblk_submit_io_sync(pblk, rqd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		pblk_err(pblk, "I/O submission failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	atomic_dec(&pblk->inflight_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	/* If a read fails, do a best effort by padding the line and retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		int pad_distance, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		if (padded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			pblk_log_read_err(pblk, rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		pad_distance = pblk_pad_distance(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		ret = pblk_recov_pad_line(pblk, line, pad_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		padded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		goto retry_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	pblk_get_packed_meta(pblk, rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	for (i = 0; i < rqd->nr_ppas; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		u64 lba = le64_to_cpu(meta->lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		lba_list[paddr++] = cpu_to_le64(lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		if (lba == ADDR_EMPTY || lba >= pblk->capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		line->nr_valid_lbas++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		pblk_update_map(pblk, lba, ppa_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	left_ppas -= rq_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (left_ppas > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		goto next_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	WARN_ON(padded && !pblk_line_is_full(line));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Scan line for lbas on out of bound area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct nvm_rq *rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	struct ppa_addr *ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	void *meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct pblk_recov_alloc p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	dma_addr_t dma_ppa_list, dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (!meta_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		goto free_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	memset(rqd, 0, pblk_g_rq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	p.ppa_list = ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	p.meta_list = meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	p.rqd = rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	p.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	p.dma_ppa_list = dma_ppa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	p.dma_meta_list = dma_meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	ret = pblk_recov_scan_oob(pblk, line, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		pblk_err(pblk, "could not recover L2P form OOB\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	if (pblk_line_is_full(line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		pblk_line_recov_close(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	mempool_free(rqd, &pblk->r_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) free_meta_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Insert lines ordered by sequence number (seq_num) on list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static void pblk_recov_line_add_ordered(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 					struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct pblk_line *t = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	list_for_each_entry(t, head, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		if (t->seq_nr > line->seq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	__list_add(&line->list, t->list.prev, &t->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	unsigned int emeta_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	u64 emeta_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	emeta_secs = lm->emeta_sec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	emeta_start = lm->sec_per_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	while (emeta_secs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		emeta_start--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		if (!test_bit(pos, line->blk_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			emeta_secs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	return emeta_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static int pblk_recov_check_line_version(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 					 struct line_emeta *emeta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct line_header *header = &emeta->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (header->version_major != EMETA_VERSION_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			 header->version_major, EMETA_VERSION_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (header->version_minor > EMETA_VERSION_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		pblk_info(pblk, "newer line minor version found: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 				header->version_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void pblk_recov_wa_counters(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 				   struct line_emeta *emeta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	struct line_header *header = &emeta->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	struct wa_counters *wa = emeta_to_wa(lm, emeta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	/* WA counters were introduced in emeta version 0.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (header->version_major > 0 || header->version_minor >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		u64 user = le64_to_cpu(wa->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		u64 pad = le64_to_cpu(wa->pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		u64 gc = le64_to_cpu(wa->gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		atomic64_set(&pblk->user_wa, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		atomic64_set(&pblk->pad_wa, pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		atomic64_set(&pblk->gc_wa, gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		pblk->user_rst_wa = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		pblk->pad_rst_wa = pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		pblk->gc_rst_wa = gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int pblk_line_was_written(struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 				 struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	struct nvm_chk_meta *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	struct ppa_addr bppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	int smeta_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	if (line->state == PBLK_LINESTATE_BAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (smeta_blk >= lm->blk_per_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	bppa = pblk->luns[smeta_blk].bppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	if (chunk->state & NVM_CHK_ST_CLOSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	    (chunk->state & NVM_CHK_ST_OPEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	     && chunk->wp >= lm->smeta_sec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	for (i = 0; i < lm->blk_per_line; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		if (line->chks[i].state & NVM_CHK_ST_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	struct pblk_line *line, *tline, *data_line = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	struct pblk_smeta *smeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	struct pblk_emeta *emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	struct line_smeta *smeta_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	int found_lines = 0, recovered_lines = 0, open_lines = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	int is_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	int meta_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	int i, valid_uuid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	LIST_HEAD(recov_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	/* TODO: Implement FTL snapshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	/* Scan recovery - takes place when FTL snapshot fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	set_bit(meta_line, &l_mg->meta_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	smeta = l_mg->sline_meta[meta_line];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	emeta = l_mg->eline_meta[meta_line];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	smeta_buf = (struct line_smeta *)smeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	/* Order data lines using their sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	for (i = 0; i < l_mg->nr_lines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		line = &pblk->lines[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		memset(smeta, 0, lm->smeta_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		line->smeta = smeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		line->lun_bitmap = ((void *)(smeta_buf)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 						sizeof(struct line_smeta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		if (!pblk_line_was_written(line, pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		/* Lines that cannot be read are assumed as not written here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		if (pblk_line_smeta_read(pblk, line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		crc = pblk_calc_smeta_crc(pblk, smeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		if (le32_to_cpu(smeta_buf->crc) != crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 			pblk_err(pblk, "found incompatible line version %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 					smeta_buf->header.version_major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		/* The first valid instance uuid is used for initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		if (!valid_uuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 			guid_copy(&pblk->instance_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 				  (guid_t *)&smeta_buf->header.uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 			valid_uuid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		if (!guid_equal(&pblk->instance_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 				(guid_t *)&smeta_buf->header.uuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 			pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 					i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		/* Update line metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		line->id = le32_to_cpu(smeta_buf->header.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		line->type = le16_to_cpu(smeta_buf->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		/* Update general metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		if (line->seq_nr >= l_mg->d_seq_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 			l_mg->d_seq_nr = line->seq_nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		l_mg->nr_free_lines--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		if (pblk_line_recov_alloc(pblk, line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		pblk_recov_line_add_ordered(&recov_list, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		found_lines++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 						line->id, smeta_buf->seq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	if (!found_lines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		guid_gen(&pblk->instance_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		WARN_ON_ONCE(!test_and_clear_bit(meta_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 							&l_mg->meta_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	/* Verify closed blocks and recover this portion of L2P table*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	list_for_each_entry_safe(line, tline, &recov_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		recovered_lines++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		line->emeta_ssec = pblk_line_emeta_start(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		line->emeta = emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		memset(line->emeta->buf, 0, lm->emeta_len[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		if (pblk_line_is_open(pblk, line)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 			pblk_recov_l2p_from_oob(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			pblk_recov_l2p_from_oob(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 			pblk_recov_l2p_from_oob(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		if (pblk_recov_check_line_version(pblk, line->emeta->buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		pblk_recov_wa_counters(pblk, line->emeta->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		if (pblk_recov_l2p_from_emeta(pblk, line))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 			pblk_recov_l2p_from_oob(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		if (pblk_line_is_full(line)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 			struct list_head *move_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 			spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 			line->state = PBLK_LINESTATE_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 			trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 					line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 			move_list = pblk_line_gc_list(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 			spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 			spin_lock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 			list_move_tail(&line->list, move_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 			spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 			mempool_free(line->map_bitmap, l_mg->bitmap_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 			line->map_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 			line->smeta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 			line->emeta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 			spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 			line->state = PBLK_LINESTATE_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 			spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 			line->emeta->mem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 			atomic_set(&line->emeta->sync, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 			trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 					line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 			data_line = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 			line->meta_line = meta_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 			open_lines++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	if (!open_lines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 		spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		WARN_ON_ONCE(!test_and_clear_bit(meta_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 							&l_mg->meta_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		l_mg->data_line = data_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		/* Allocate next line for preparation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 		l_mg->data_next = pblk_line_get(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 		if (l_mg->data_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 			l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			l_mg->data_next->type = PBLK_LINETYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			is_next = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	if (is_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		pblk_line_erase(pblk, l_mg->data_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	if (found_lines != recovered_lines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		pblk_err(pblk, "failed to recover all found lines %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 						found_lines, recovered_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	return data_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)  * Pad current line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int pblk_recov_pad(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	int left_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	spin_lock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	line = l_mg->data_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	left_msecs = line->left_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	spin_unlock(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	ret = pblk_recov_pad_line(pblk, line, left_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 		pblk_err(pblk, "tear down padding failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	pblk_line_close_meta(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }