^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2016 CNEX Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Initial release: Javier Gonzalez <javier@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Matias Bjorling <matias@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * modify it under the terms of the GNU General Public License version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 2 as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * pblk-read.c - pblk's read path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "pblk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * There is no guarantee that the value read from cache has not been updated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * resides at another location in the cache. We guarantee though that if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * value is read from the cache, it belongs to the mapped lba. In order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * guarantee and order between writes and reads are ordered, a flush must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sector_t lba, struct ppa_addr ppa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Callers must ensure that the ppa points to a cache address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) BUG_ON(pblk_ppa_empty(ppa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) BUG_ON(!pblk_addr_in_cache(ppa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct bio *bio, sector_t blba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool *from_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void *meta_list = rqd->meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int nr_secs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) from_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!*from_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) for (i = 0; i < nr_secs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) sector_t lba = blba + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (pblk_ppa_empty(rqd->ppa_list[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) meta->lba = addr_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Try to read from write buffer. The address is later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * checked on the write buffer to prevent retrieving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * overwritten data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!pblk_read_from_cache(pblk, bio, lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) rqd->ppa_list[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * We didn't call with bio_advance()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * yet, so we can just retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * We already call bio_advance()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * so we cannot retry and we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * to quit that function in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * to allow caller to handle the bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * splitting in the current sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) nr_secs = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) meta->lba = cpu_to_le64(lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) atomic_long_inc(&pblk->cache_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (pblk_io_aligned(pblk, nr_secs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rqd->is_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) atomic_long_add(nr_secs, &pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return nr_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) sector_t blba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void *meta_list = rqd->meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int nr_lbas = rqd->nr_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!pblk_is_oob_meta_supported(pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) for (i = 0; i < nr_lbas; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u64 lba = le64_to_cpu(meta->lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (lba == ADDR_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (lba != blba + i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) print_ppa(pblk, &ppa_list[i], "seq", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) lba, (u64)blba + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * There can be holes in the lba list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 *lba_list, int nr_lbas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void *meta_lba_list = rqd->meta_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!pblk_is_oob_meta_supported(pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = 0, j = 0; i < nr_lbas; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct pblk_sec_meta *meta = pblk_get_meta(pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) meta_lba_list, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u64 lba = lba_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u64 meta_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (lba == ADDR_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) meta_lba = le64_to_cpu(meta->lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (lba != meta_lba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) print_ppa(pblk, &ppa_list[j], "rnd", j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) meta_lba, lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void pblk_end_user_read(struct bio *bio, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (error && error != NVM_RSP_WARN_HIGHECC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool put_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct bio *int_bio = rqd->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long start_time = r_ctx->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bio_end_io_acct(int_bio, start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (rqd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pblk_log_read_err(pblk, rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pblk_read_check_seq(pblk, rqd, r_ctx->lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bio_put(int_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (put_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pblk_rq_to_line_put(pblk, rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pblk_free_rqd(pblk, rqd, PBLK_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) atomic_dec(&pblk->inflight_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void pblk_end_io_read(struct nvm_rq *rqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct pblk *pblk = rqd->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct bio *bio = (struct bio *)r_ctx->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pblk_end_user_read(bio, rqd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __pblk_end_io_read(pblk, rqd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sector_t lba, bool *from_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) atomic_long_inc(&pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (pblk_ppa_empty(ppa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) meta->lba = addr_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Try to read from write buffer. The address is later checked on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * write buffer to prevent retrieving overwritten data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (pblk_addr_in_cache(ppa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) meta->lba = cpu_to_le64(lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) atomic_long_inc(&pblk->cache_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rqd->ppa_addr = ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void pblk_submit_read(struct pblk *pblk, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) sector_t blba = pblk_get_lba(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int nr_secs = pblk_get_secs(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bool from_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct pblk_g_ctx *r_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct nvm_rq *rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct bio *int_bio, *split_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned long start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) start_time = bio_start_io_acct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rqd = pblk_alloc_rqd(pblk, PBLK_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rqd->opcode = NVM_OP_PREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rqd->nr_ppas = nr_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rqd->private = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) rqd->end_io = pblk_end_io_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) r_ctx = nvm_rq_to_pdu(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) r_ctx->start_time = start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) r_ctx->lba = blba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (pblk_alloc_rqd_meta(pblk, rqd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pblk_free_rqd(pblk, rqd, PBLK_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Clone read bio to deal internally with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * -read errors when reading from drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * -bio_advance() calls during cache reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (nr_secs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) &from_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) split_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) r_ctx->private = bio; /* original bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rqd->bio = int_bio; /* internal bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (from_cache && nr_secs == rqd->nr_ppas) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* All data was read from cache, we can complete the IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pblk_end_user_read(bio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) atomic_inc(&pblk->inflight_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __pblk_end_io_read(pblk, rqd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else if (nr_secs != rqd->nr_ppas) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* The read bio request could be partially filled by the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * buffer, but there are some holes that need to be read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * the drive. In order to handle this, we will use block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * mechanism to split this request in to smaller ones and make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * a chain of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) &pblk_bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) bio_chain(split_bio, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* New bio contains first N sectors of the previous one, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * we can continue to use existing rqd, but we need to shrink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * the number of PPAs in it. New bio is also guaranteed that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * it contains only either data from cache or from drive, newer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * mix of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bio = split_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rqd->nr_ppas = nr_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (rqd->nr_ppas == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rqd->ppa_addr = rqd->ppa_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Recreate int_bio - existing might have some needed internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * fields modified already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) bio_put(int_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) goto split_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else if (pblk_submit_io(pblk, rqd, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Submitting IO to drive failed, let's report an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rqd->error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) pblk_end_io_read(rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct pblk_line *line, u64 *lba_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u64 *paddr_list_gc, unsigned int nr_secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct ppa_addr ppa_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int valid_secs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i = 0; i < nr_secs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (lba_list[i] == ADDR_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) atomic_long_add(valid_secs, &pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return valid_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct pblk_line *line, sector_t lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u64 paddr_gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct ppa_addr ppa_l2p, ppa_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int valid_secs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (lba == ADDR_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* logic error: lba out-of-bounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (lba >= pblk->capacity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) WARN(1, "pblk: read lba out of bounds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_lock(&pblk->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ppa_l2p = pblk_trans_map_get(pblk, lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) spin_unlock(&pblk->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rqd->ppa_addr = ppa_l2p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) valid_secs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) atomic_long_inc(&pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return valid_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct nvm_rq rqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int ret = NVM_IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) memset(&rqd, 0, sizeof(struct nvm_rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = pblk_alloc_rqd_meta(pblk, &rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (gc_rq->nr_secs > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) gc_rq->lba_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) gc_rq->paddr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) gc_rq->nr_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (gc_rq->secs_to_gc == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) rqd.ppa_addr = rqd.ppa_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) gc_rq->lba_list[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) gc_rq->paddr_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!(gc_rq->secs_to_gc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rqd.opcode = NVM_OP_PREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rqd.nr_ppas = gc_rq->secs_to_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) atomic_dec(&pblk->inflight_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (rqd.error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) atomic_long_inc(&pblk->read_failed_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pblk_print_failed_rqd(pblk, &rqd, rqd.error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pblk_free_rqd_meta(pblk, &rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) pblk_free_rqd_meta(pblk, &rqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }