Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2016 CNEX Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *                  Matias Bjorling <matias@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * modify it under the terms of the GNU General Public License version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * 2 as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * pblk-gc.c - pblk's garbage collector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "pblk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "pblk-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (gc_rq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		vfree(gc_rq->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	kfree(gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static int pblk_gc_write(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct pblk_gc_rq *gc_rq, *tgc_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	LIST_HEAD(w_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	spin_lock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (list_empty(&gc->w_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		spin_unlock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	gc->w_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	spin_unlock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		pblk_write_gc_to_cache(pblk, gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		list_del(&gc_rq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		kref_put(&gc_rq->line->ref, pblk_line_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		pblk_gc_free_gc_rq(gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static void pblk_gc_writer_kick(struct pblk_gc *gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	wake_up_process(gc->gc_writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct list_head *move_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	spin_lock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	WARN_ON(line->state != PBLK_LINESTATE_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	line->state = PBLK_LINESTATE_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 					line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* We need to reset gc_group in order to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * pblk_line_gc_list will return proper move_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * since right now current line is not on any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * gc lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	line->gc_group = PBLK_LINEGC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	move_list = pblk_line_gc_list(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	list_add_tail(&line->list, move_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static void pblk_gc_line_ws(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct pblk_line_ws *gc_rq_ws = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 						struct pblk_line_ws, ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct pblk *pblk = gc_rq_ws->pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct pblk_line *line = gc_rq_ws->line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	up(&gc->gc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	/* Read from GC victim block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	ret = pblk_submit_read_gc(pblk, gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		line->w_err_gc->has_gc_err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (!gc_rq->secs_to_gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	spin_lock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (gc->w_entries >= PBLK_GC_RQ_QD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		spin_unlock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		pblk_gc_writer_kick(&pblk->gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		usleep_range(128, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	gc->w_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	list_add_tail(&gc_rq->list, &gc->w_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	spin_unlock(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pblk_gc_writer_kick(&pblk->gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	kfree(gc_rq_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	pblk_gc_free_gc_rq(gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	kref_put(&line->ref, pblk_line_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	kfree(gc_rq_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				       struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct line_emeta *emeta_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	unsigned int lba_list_size = lm->emeta_len[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	__le64 *lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!emeta_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	ret = pblk_line_emeta_read(pblk, line, emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		pblk_err(pblk, "line %d read emeta failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 				line->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		kvfree(emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	/* If this read fails, it means that emeta is corrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 * For now, leave the line untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * TODO: Implement a recovery routine that scans and moves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * all sectors on the line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	ret = pblk_recov_check_emeta(pblk, emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		pblk_err(pblk, "inconsistent emeta (line %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 				line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		kvfree(emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (lba_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	kvfree(emeta_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void pblk_gc_line_prepare_ws(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 									ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct pblk *pblk = line_ws->pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct pblk_line *line = line_ws->line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct pblk_line_ws *gc_rq_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct pblk_gc_rq *gc_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	__le64 *lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	unsigned long *invalid_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int sec_left, nr_secs, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (!invalid_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		goto fail_free_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (line->w_err_gc->has_write_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		lba_list = line->w_err_gc->lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		line->w_err_gc->lba_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		lba_list = get_lba_list_from_emeta(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		if (!lba_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			pblk_err(pblk, "could not interpret emeta (line %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			goto fail_free_invalid_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	sec_left = pblk_line_vsc(line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (sec_left < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		goto fail_free_lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	bit = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) next_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (!gc_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		goto fail_free_lba_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	nr_secs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 								bit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (bit > line->emeta_ssec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		gc_rq->paddr_list[nr_secs] = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	} while (nr_secs < pblk->max_write_pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (unlikely(!nr_secs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		kfree(gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	gc_rq->nr_secs = nr_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	gc_rq->line = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (!gc_rq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		goto fail_free_gc_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (!gc_rq_ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		goto fail_free_gc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	gc_rq_ws->pblk = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	gc_rq_ws->line = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	gc_rq_ws->priv = gc_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/* The write GC path can be much slower than the read GC one due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * the budget imposed by the rate-limiter. Balance in case that we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * back pressure from the write GC path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	kref_get(&line->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	sec_left -= nr_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (sec_left > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		goto next_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	kvfree(lba_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	kfree(line_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	kfree(invalid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	kref_put(&line->ref, pblk_line_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	atomic_dec(&gc->read_inflight_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) fail_free_gc_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	vfree(gc_rq->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) fail_free_gc_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	kfree(gc_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) fail_free_lba_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	kvfree(lba_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fail_free_invalid_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	kfree(invalid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) fail_free_ws:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	kfree(line_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	/* Line goes back to closed state, so we cannot release additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 * reference for line, since we do that only when we want to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	 * gc to free line state transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	pblk_put_line_back(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	atomic_dec(&gc->read_inflight_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	pblk_err(pblk, "failed to GC line %d\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct pblk_line_ws *line_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (!line_ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	line_ws->pblk = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	line_ws->line = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	atomic_inc(&gc->pipeline_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	queue_work(gc->gc_reader_wq, &line_ws->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void pblk_gc_reader_kick(struct pblk_gc *gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	wake_up_process(gc->gc_reader_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void pblk_gc_kick(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	pblk_gc_writer_kick(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	pblk_gc_reader_kick(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* If we're shutting down GC, let's not start it up again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (gc->gc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		wake_up_process(gc->gc_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		mod_timer(&gc->gc_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			  jiffies + msecs_to_jiffies(GC_TIME_MSECS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int pblk_gc_read(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	spin_lock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (list_empty(&gc->r_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		spin_unlock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	line = list_first_entry(&gc->r_list, struct pblk_line, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	list_del(&line->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	spin_unlock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	pblk_gc_kick(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (pblk_gc_line(pblk, line)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		pblk_err(pblk, "failed to GC line %d\n", line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		/* rollback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		spin_lock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		list_add_tail(&line->list, &gc->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		spin_unlock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 						 struct list_head *group_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct pblk_line *line, *victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	victim = list_first_entry(group_list, struct pblk_line, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	list_for_each_entry(line, group_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		if (!atomic_read(&line->sec_to_update))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			line_vsc = le32_to_cpu(*line->vsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (line_vsc < victim_vsc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			victim = line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			victim_vsc = le32_to_cpu(*victim->vsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (victim_vsc == ~0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	unsigned int nr_blocks_free, nr_blocks_need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	unsigned int werr_lines = atomic_read(&rl->werr_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	nr_blocks_need = pblk_rl_high_thrs(rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	nr_blocks_free = pblk_rl_nr_free_blks(rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/* This is not critical, no need to take lock here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	return ((werr_lines > 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void pblk_gc_free_full_lines(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		spin_lock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (list_empty(&l_mg->gc_full_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		line = list_first_entry(&l_mg->gc_full_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 							struct pblk_line, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		line->state = PBLK_LINESTATE_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 					line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		list_del(&line->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		atomic_inc(&gc->pipeline_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		kref_put(&line->ref, pblk_line_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  * Lines with no valid sectors will be returned to the free list immediately. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * GC is activated - either because the free block count is under the determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * threshold, or because it is being forced from user space - only lines with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * high count of invalid sectors will be recycled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void pblk_gc_run(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	struct list_head *group_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	bool run_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int read_inflight_gc, gc_group = 0, prev_group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	pblk_gc_free_full_lines(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) next_gc_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	group_list = l_mg->gc_lists[gc_group++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		spin_lock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		line = pblk_gc_get_victim_line(pblk, group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		if (!line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 			spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		spin_lock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		line->state = PBLK_LINESTATE_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 					line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		spin_unlock(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		list_del(&line->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		spin_unlock(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		spin_lock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		list_add_tail(&line->list, &gc->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		spin_unlock(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		pblk_gc_reader_kick(gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		prev_group = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		/* No need to queue up more GC lines than we can handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (!prev_group && pblk->rl.rb_state > gc_group &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 						gc_group < PBLK_GC_NR_LISTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		goto next_gc_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void pblk_gc_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	pblk_gc_kick(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int pblk_gc_ts(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	struct pblk *pblk = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		pblk_gc_run(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static int pblk_gc_writer_ts(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct pblk *pblk = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		if (!pblk_gc_write(pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int pblk_gc_reader_ts(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	struct pblk *pblk = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		if (!pblk_gc_read(pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		atomic_read(&gc->pipeline_gc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		if (!atomic_read(&gc->pipeline_gc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void pblk_gc_start(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	pblk->gc.gc_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	pblk_debug(pblk, "gc start\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) void pblk_gc_should_start(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (gc->gc_enabled && !gc->gc_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		pblk_gc_start(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		pblk_gc_kick(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void pblk_gc_should_stop(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	if (gc->gc_active && !gc->gc_forced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		gc->gc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) void pblk_gc_should_kick(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	pblk_rl_update_rates(&pblk->rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			      int *gc_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	spin_lock(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	*gc_enabled = gc->gc_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	*gc_active = gc->gc_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	spin_unlock(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int pblk_gc_sysfs_force(struct pblk *pblk, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (force < 0 || force > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	spin_lock(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	gc->gc_forced = force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		gc->gc_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		gc->gc_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	spin_unlock(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	pblk_gc_should_start(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int pblk_gc_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (IS_ERR(gc->gc_ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		pblk_err(pblk, "could not allocate GC main kthread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		return PTR_ERR(gc->gc_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 							"pblk-gc-writer-ts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	if (IS_ERR(gc->gc_writer_ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		pblk_err(pblk, "could not allocate GC writer kthread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		ret = PTR_ERR(gc->gc_writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		goto fail_free_main_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 							"pblk-gc-reader-ts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	if (IS_ERR(gc->gc_reader_ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		pblk_err(pblk, "could not allocate GC reader kthread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		ret = PTR_ERR(gc->gc_reader_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		goto fail_free_writer_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	gc->gc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	gc->gc_forced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	gc->gc_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	gc->w_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	atomic_set(&gc->read_inflight_gc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	atomic_set(&gc->pipeline_gc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	/* Workqueue that reads valid sectors from a line and submit them to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	 * GC writer to be recycled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	if (!gc->gc_line_reader_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		pblk_err(pblk, "could not allocate GC line reader workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		goto fail_free_reader_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	/* Workqueue that prepare lines for GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 					WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (!gc->gc_reader_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		pblk_err(pblk, "could not allocate GC reader workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		goto fail_free_reader_line_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	spin_lock_init(&gc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	spin_lock_init(&gc->w_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	spin_lock_init(&gc->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	INIT_LIST_HEAD(&gc->w_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	INIT_LIST_HEAD(&gc->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) fail_free_reader_line_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	destroy_workqueue(gc->gc_line_reader_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) fail_free_reader_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	kthread_stop(gc->gc_reader_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) fail_free_writer_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	kthread_stop(gc->gc_writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) fail_free_main_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	kthread_stop(gc->gc_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void pblk_gc_exit(struct pblk *pblk, bool graceful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	struct pblk_gc *gc = &pblk->gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	gc->gc_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	del_timer_sync(&gc->gc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	gc->gc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	if (gc->gc_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		kthread_stop(gc->gc_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	if (gc->gc_reader_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		kthread_stop(gc->gc_reader_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	if (graceful) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		flush_workqueue(gc->gc_reader_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		flush_workqueue(gc->gc_line_reader_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	destroy_workqueue(gc->gc_reader_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	destroy_workqueue(gc->gc_line_reader_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	if (gc->gc_writer_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		kthread_stop(gc->gc_writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }