^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016 CNEX Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Initial release: Javier Gonzalez <javier@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Matias Bjorling <matias@cnexlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modify it under the terms of the GNU General Public License version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 2 as published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Implementation of a physical block-device target for Open-channel SSDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * pblk-init.c - pblk's initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "pblk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "pblk-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static unsigned int write_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) module_param(write_buffer_size, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct pblk_global_caches {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct kmem_cache *ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct kmem_cache *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct kmem_cache *g_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct kmem_cache *w_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct mutex mutex; /* Ensures consistency between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * caches and kref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct pblk_global_caches pblk_caches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .kref = KREF_INIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct bio_set pblk_bio_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static blk_qc_t pblk_submit_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct pblk *pblk = bio->bi_disk->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (bio_op(bio) == REQ_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pblk_discard(pblk, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!(bio->bi_opf & REQ_PREFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * constraint. Writes can be of arbitrary size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (bio_data_dir(bio) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) blk_queue_split(&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pblk_submit_read(pblk, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Prevent deadlock in the case of a modest LUN configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * and large user I/Os. Unless stalled, the rate limiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * leaves at least 256KB available for user I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) blk_queue_split(&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static const struct block_device_operations pblk_bops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .submit_bio = pblk_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static size_t pblk_trans_map_size(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int entry_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (pblk->addrf_len < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) entry_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return entry_size * pblk->capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static u32 pblk_l2p_crc(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) size_t map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 crc = ~(u32)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) map_size = pblk_trans_map_size(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) crc = crc32_le(crc, pblk->trans_map, map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void pblk_l2p_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) vfree(pblk->trans_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct pblk_line *line = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (factory_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) guid_gen(&pblk->instance_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) line = pblk_recov_l2p(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (IS_ERR(line)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pblk_err(pblk, "could not recover l2p table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Free full lines directly as GC has not been started yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pblk_gc_free_full_lines(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Configure next line for user data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) line = pblk_line_get_first_data(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sector_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) size_t map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) map_size = pblk_trans_map_size(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!pblk->trans_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pblk_ppa_set_empty(&ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) for (i = 0; i < pblk->capacity; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pblk_trans_map_set(pblk, i, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ret = pblk_l2p_recover(pblk, factory_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) vfree(pblk->trans_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void pblk_rwb_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (pblk_rb_tear_down_check(&pblk->rwb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pblk_err(pblk, "write buffer error on tear down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pblk_rb_free(&pblk->rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int pblk_rwb_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int pgs_in_buffer, threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) threshold = geo->mw_cunits * geo->all_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * geo->all_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) buffer_size = write_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) buffer_size = pgs_in_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct nvm_addrf_12 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int power_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Re-calculate channel and lun format to adapt to configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) power_len = get_count_order(geo->num_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (1 << power_len != geo->num_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pblk_err(pblk, "supports only power-of-two channel config.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dst->ch_len = power_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) power_len = get_count_order(geo->num_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (1 << power_len != geo->num_lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) pblk_err(pblk, "supports only power-of-two LUN config.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dst->lun_len = power_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dst->blk_len = src->blk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dst->pg_len = src->pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dst->pln_len = src->pln_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dst->sec_len = src->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dst->sec_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dst->pln_offset = dst->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dst->ch_offset = dst->pln_offset + dst->pln_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) dst->lun_offset = dst->ch_offset + dst->ch_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dst->pg_offset = dst->lun_offset + dst->lun_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dst->blk_offset = dst->pg_offset + dst->pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return dst->blk_offset + src->blk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct pblk_addrf *udst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct nvm_addrf *src = &geo->addrf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) adst->ch_len = get_count_order(geo->num_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) adst->lun_len = get_count_order(geo->num_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) adst->chk_len = src->chk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) adst->sec_len = src->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) adst->sec_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) adst->ch_offset = adst->sec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) adst->lun_offset = adst->ch_offset + adst->ch_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) adst->chk_offset = adst->lun_offset + adst->lun_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) udst->sec_stripe = geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) udst->ch_stripe = geo->num_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) udst->lun_stripe = geo->num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return adst->chk_offset + adst->chk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int pblk_set_addrf(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (geo->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case NVM_OCSSD_SPEC_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pblk_err(pblk, "bad configuration of sectors/pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) (void *)&pblk->addrf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case NVM_OCSSD_SPEC_20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) &pblk->uaddrf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pblk_err(pblk, "OCSSD revision not supported (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) geo->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int pblk_create_global_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sizeof(struct pblk_line_ws), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!pblk_caches.ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pblk_caches.rec = kmem_cache_create("pblk_rec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sizeof(struct pblk_rec_ctx), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!pblk_caches.rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto fail_destroy_ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!pblk_caches.g_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto fail_destroy_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!pblk_caches.w_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto fail_destroy_g_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) fail_destroy_g_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) kmem_cache_destroy(pblk_caches.g_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) fail_destroy_rec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) kmem_cache_destroy(pblk_caches.rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) fail_destroy_ws:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) kmem_cache_destroy(pblk_caches.ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int pblk_get_global_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mutex_lock(&pblk_caches.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (kref_get_unless_zero(&pblk_caches.kref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = pblk_create_global_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) kref_init(&pblk_caches.kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) mutex_unlock(&pblk_caches.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void pblk_destroy_global_caches(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct pblk_global_caches *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) c = container_of(ref, struct pblk_global_caches, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) kmem_cache_destroy(c->ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) kmem_cache_destroy(c->rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) kmem_cache_destroy(c->g_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kmem_cache_destroy(c->w_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void pblk_put_global_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mutex_lock(&pblk_caches.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mutex_unlock(&pblk_caches.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int pblk_core_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int ret, max_write_ppas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) atomic64_set(&pblk->user_wa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) atomic64_set(&pblk->pad_wa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) atomic64_set(&pblk->gc_wa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pblk->user_rst_wa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) pblk->pad_rst_wa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pblk->gc_rst_wa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) atomic64_set(&pblk->nr_flush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pblk->nr_flush_rst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pblk->min_write_pgs = geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pblk->min_write_pgs_data = pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) max_write_ppas = pblk->min_write_pgs * geo->all_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) pblk->oob_meta_size = geo->sos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!pblk_is_oob_meta_supported(pblk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* For drives which does not have OOB metadata feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * in order to support recovery feature we need to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * so called packed metadata. Packed metada will store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * the same information as OOB metadata (l2p table mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * but in the form of the single page at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * every write request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (pblk->min_write_pgs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* We want to keep all the packed metadata on single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * page per write requests. So we need to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * it will fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * This is more like sanity check, since there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * no device with such a big minimal write size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * (above 1 metabytes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pblk_err(pblk, "Not supported min write size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* For packed meta approach we do some simplification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * On read path we always issue requests which size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * equal to max_write_pgs, with all pages filled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * user payload except of last one page which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * filled with packed metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pblk->max_write_pgs = pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!pblk->pad_dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (pblk_get_global_caches())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) goto fail_free_pad_dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Internal bios can be at most the sectors signaled by the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto free_global_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pblk_caches.ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) goto free_page_bio_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pblk_caches.rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto free_gen_ws_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pblk_caches.g_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto free_rec_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pblk_caches.g_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto free_r_rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) pblk_caches.w_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto free_e_rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pblk->close_wq = alloc_workqueue("pblk-close-wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!pblk->close_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto free_w_rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!pblk->bb_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto free_close_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!pblk->r_end_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto free_bb_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (pblk_set_addrf(pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto free_r_end_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) INIT_LIST_HEAD(&pblk->compl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) INIT_LIST_HEAD(&pblk->resubmit_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) free_r_end_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) destroy_workqueue(pblk->r_end_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) free_bb_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) destroy_workqueue(pblk->bb_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) free_close_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) destroy_workqueue(pblk->close_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) free_w_rq_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mempool_exit(&pblk->w_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) free_e_rq_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mempool_exit(&pblk->e_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) free_r_rq_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mempool_exit(&pblk->r_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) free_rec_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mempool_exit(&pblk->rec_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) free_gen_ws_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) mempool_exit(&pblk->gen_ws_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) free_page_bio_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) mempool_exit(&pblk->page_bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) free_global_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pblk_put_global_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) fail_free_pad_dist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) kfree(pblk->pad_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void pblk_core_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (pblk->close_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) destroy_workqueue(pblk->close_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (pblk->r_end_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) destroy_workqueue(pblk->r_end_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (pblk->bb_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) destroy_workqueue(pblk->bb_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mempool_exit(&pblk->page_bio_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mempool_exit(&pblk->gen_ws_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mempool_exit(&pblk->rec_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) mempool_exit(&pblk->r_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mempool_exit(&pblk->e_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mempool_exit(&pblk->w_rq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pblk_put_global_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) kfree(pblk->pad_dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void pblk_line_mg_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kfree(l_mg->bb_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kfree(l_mg->bb_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kfree(l_mg->vsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) for (i = 0; i < PBLK_DATA_LINES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) kfree(l_mg->sline_meta[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kvfree(l_mg->eline_meta[i]->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) kfree(l_mg->eline_meta[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mempool_destroy(l_mg->bitmap_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) kmem_cache_destroy(l_mg->bitmap_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) kfree(line->blk_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) kfree(line->erase_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) kfree(line->chks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kvfree(w_err_gc->lba_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kfree(w_err_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void pblk_lines_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) for (i = 0; i < l_mg->nr_lines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) line = &pblk->lines[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pblk_line_free(line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) pblk_line_meta_free(l_mg, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pblk_line_mg_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kfree(pblk->luns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) kfree(pblk->lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int pblk_luns_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct pblk_lun *rlun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* TODO: Implement unbalanced LUN support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (geo->num_lun < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pblk_err(pblk, "unbalanced LUN config.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!pblk->luns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) for (i = 0; i < geo->all_luns; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Stripe across channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int ch = i % geo->num_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int lun_raw = i / geo->num_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int lunid = lun_raw + ch * geo->num_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) rlun = &pblk->luns[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rlun->bppa = dev->luns[lunid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sema_init(&rlun->wr_sem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* See comment over struct line_emeta definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static unsigned int calc_emeta_len(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Round to sector size so that lba_list starts on its own sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) lm->emeta_sec[1] = DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) sizeof(struct line_emeta) + lm->blk_bitmap_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sizeof(struct wa_counters), geo->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Round to sector size so that vsc_list starts on its own sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) geo->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) geo->csecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) sector_t provisioned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int sec_meta, blk_meta, clba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int minimum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (geo->op == NVM_TARGET_DEFAULT_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pblk->op = PBLK_DEFAULT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) pblk->op = geo->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) minimum = pblk_get_min_chks(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) provisioned = nr_free_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) provisioned *= (100 - pblk->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sector_div(provisioned, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if ((nr_free_chks - provisioned) < minimum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (geo->op != NVM_TARGET_DEFAULT_OP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pblk_err(pblk, "OP too small to create a sane instance\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * is not enough, calculate and set sane value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) provisioned = nr_free_chks - minimum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pblk->op = (100 * minimum) / nr_free_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pblk->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pblk->op_blks = nr_free_chks - provisioned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Internally pblk manages all free blocks, but all calculations based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * on user capacity consider only provisioned blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) pblk->rl.total_blocks = nr_free_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Consider sectors used for metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pblk->capacity = (provisioned - blk_meta) * clba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) atomic_set(&pblk->rl.free_blocks, nr_free_chks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct nvm_chk_meta *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int i, nr_bad_chks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < lm->blk_per_line; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct pblk_lun *rlun = &pblk->luns[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct nvm_chk_meta *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct nvm_chk_meta *chunk_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct ppa_addr ppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ppa = rlun->bppa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pos = pblk_ppa_to_pos(geo, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) chunk = &line->chks[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ppa.m.chk = line->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) chunk->state = chunk_meta->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) chunk->type = chunk_meta->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) chunk->wi = chunk_meta->wi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) chunk->slba = chunk_meta->slba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) chunk->cnlb = chunk_meta->cnlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) chunk->wp = chunk_meta->wp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) chunk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!(chunk->state & NVM_CHK_ST_OFFLINE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) set_bit(pos, line->blk_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) nr_bad_chks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return nr_bad_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) void *chunk_meta, int line_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) long nr_bad_chks, chk_in_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) line->pblk = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) line->id = line_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) line->type = PBLK_LINETYPE_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) line->state = PBLK_LINESTATE_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) line->gc_group = PBLK_LINEGC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) line->vsc = &l_mg->vsc_list[line_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_lock_init(&line->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) chk_in_line = lm->blk_per_line - nr_bad_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) chk_in_line < lm->min_blk_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) line->state = PBLK_LINESTATE_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) list_add_tail(&line->list, &l_mg->bad_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) atomic_set(&line->blk_in_line, chk_in_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) list_add_tail(&line->list, &l_mg->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) l_mg->nr_free_lines++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return chk_in_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (!line->blk_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!line->erase_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) goto free_blk_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) line->chks = kmalloc_array(lm->blk_per_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) sizeof(struct nvm_chk_meta), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!line->chks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto free_erase_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!line->w_err_gc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto free_chks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) free_chks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) kfree(line->chks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) free_erase_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) kfree(line->erase_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) free_blk_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) kfree(line->blk_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static int pblk_line_mg_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int i, bb_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) l_mg->nr_lines = geo->num_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) l_mg->log_line = l_mg->data_line = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) l_mg->nr_free_lines = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) INIT_LIST_HEAD(&l_mg->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) INIT_LIST_HEAD(&l_mg->corrupt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) INIT_LIST_HEAD(&l_mg->bad_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) INIT_LIST_HEAD(&l_mg->gc_full_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) INIT_LIST_HEAD(&l_mg->gc_high_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) INIT_LIST_HEAD(&l_mg->gc_mid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) INIT_LIST_HEAD(&l_mg->gc_low_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) INIT_LIST_HEAD(&l_mg->gc_empty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) INIT_LIST_HEAD(&l_mg->gc_werr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) INIT_LIST_HEAD(&l_mg->emeta_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) l_mg->gc_lists[0] = &l_mg->gc_werr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) l_mg->gc_lists[1] = &l_mg->gc_high_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) l_mg->gc_lists[2] = &l_mg->gc_mid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) l_mg->gc_lists[3] = &l_mg->gc_low_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) spin_lock_init(&l_mg->free_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) spin_lock_init(&l_mg->close_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) spin_lock_init(&l_mg->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!l_mg->vsc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (!l_mg->bb_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto fail_free_vsc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!l_mg->bb_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto fail_free_bb_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* smeta is always small enough to fit on a kmalloc memory allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * emeta depends on the number of LUNs allocated to the pblk instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) for (i = 0; i < PBLK_DATA_LINES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!l_mg->sline_meta[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto fail_free_smeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) lm->sec_bitmap_len, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!l_mg->bitmap_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto fail_free_smeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* the bitmap pool is used for both valid and map bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) l_mg->bitmap_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!l_mg->bitmap_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) goto fail_destroy_bitmap_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* emeta allocates three different buffers for managing metadata with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * in-memory and in-media layouts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) for (i = 0; i < PBLK_DATA_LINES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct pblk_emeta *emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!emeta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto fail_free_emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!emeta->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) kfree(emeta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) goto fail_free_emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) emeta->nr_entries = lm->emeta_sec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) l_mg->eline_meta[i] = emeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) for (i = 0; i < l_mg->nr_lines; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bb_distance = (geo->all_luns) * geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) for (i = 0; i < lm->sec_per_line; i += bb_distance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bitmap_set(l_mg->bb_template, i, geo->ws_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) fail_free_emeta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kvfree(l_mg->eline_meta[i]->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) kfree(l_mg->eline_meta[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) mempool_destroy(l_mg->bitmap_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) fail_destroy_bitmap_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) kmem_cache_destroy(l_mg->bitmap_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) fail_free_smeta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) for (i = 0; i < PBLK_DATA_LINES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) kfree(l_mg->sline_meta[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) kfree(l_mg->bb_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) fail_free_bb_template:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) kfree(l_mg->bb_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) fail_free_vsc_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) kfree(l_mg->vsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int pblk_line_meta_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct nvm_tgt_dev *dev = pblk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct pblk_line_meta *lm = &pblk->lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) unsigned int smeta_len, emeta_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) lm->sec_per_line = geo->clba * geo->all_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) lm->blk_per_line = geo->all_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) lm->mid_thrs = lm->sec_per_line / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) lm->high_thrs = lm->sec_per_line / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Calculate necessary pages for smeta. See comment over struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * line_smeta definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) add_smeta_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) lm->smeta_sec = i * geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) lm->smeta_len = lm->smeta_sec * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (smeta_len > lm->smeta_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) goto add_smeta_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Calculate necessary pages for emeta. See comment over struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * line_emeta definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) add_emeta_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) lm->emeta_sec[0] = i * geo->ws_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) emeta_len = calc_emeta_len(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (emeta_len > lm->emeta_len[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto add_emeta_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) lm->min_blk_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (geo->all_luns > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) lm->emeta_sec[0], geo->clba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (lm->min_blk_line > lm->blk_per_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) lm->blk_per_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static int pblk_lines_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct pblk_line_mgmt *l_mg = &pblk->l_mg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct pblk_line *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) void *chunk_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int nr_free_chks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ret = pblk_line_meta_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ret = pblk_line_mg_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = pblk_luns_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto fail_free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) chunk_meta = pblk_get_chunk_meta(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (IS_ERR(chunk_meta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = PTR_ERR(chunk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) goto fail_free_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!pblk->lines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto fail_free_chunk_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) for (i = 0; i < l_mg->nr_lines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) line = &pblk->lines[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = pblk_alloc_line_meta(pblk, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) goto fail_free_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) trace_pblk_line_state(pblk_disk_name(pblk), line->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) line->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!nr_free_chks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto fail_free_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = pblk_set_provision(pblk, nr_free_chks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto fail_free_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) vfree(chunk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) fail_free_lines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) pblk_line_meta_free(l_mg, &pblk->lines[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) kfree(pblk->lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) fail_free_chunk_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) vfree(chunk_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) fail_free_luns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) kfree(pblk->luns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) fail_free_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) pblk_line_mg_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static int pblk_writer_init(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (IS_ERR(pblk->writer_ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int err = PTR_ERR(pblk->writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (err != -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) pblk_err(pblk, "could not allocate writer kthread (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void pblk_writer_stop(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* The pipeline must be stopped and the write buffer emptied before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * write thread is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) WARN(pblk_rb_read_count(&pblk->rwb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) "Stopping not fully persisted write buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) WARN(pblk_rb_sync_count(&pblk->rwb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "Stopping not fully synced write buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) del_timer_sync(&pblk->wtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (pblk->writer_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) kthread_stop(pblk->writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void pblk_free(struct pblk *pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pblk_lines_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) pblk_l2p_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pblk_rwb_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pblk_core_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) kfree(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void pblk_tear_down(struct pblk *pblk, bool graceful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (graceful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) __pblk_pipeline_flush(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) __pblk_pipeline_stop(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) pblk_writer_stop(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pblk_rb_sync_l2p(&pblk->rwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) pblk_rl_free(&pblk->rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static void pblk_exit(void *private, bool graceful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct pblk *pblk = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) pblk_gc_exit(pblk, graceful);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) pblk_tear_down(pblk, graceful);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) pblk_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static sector_t pblk_capacity(void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct pblk *pblk = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return pblk->capacity * NR_PHY_IN_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct nvm_geo *geo = &dev->geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct request_queue *bqueue = dev->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct request_queue *tqueue = tdisk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct pblk *pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pblk->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) pblk->disk = tdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) pblk->state = PBLK_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) trace_pblk_state(pblk_disk_name(pblk), pblk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) pblk->gc.gc_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!(geo->version == NVM_OCSSD_SPEC_12 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) geo->version == NVM_OCSSD_SPEC_20)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) pblk_err(pblk, "OCSSD version not supported (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) geo->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) kfree(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (geo->ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) pblk_err(pblk, "extended metadata not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) kfree(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) spin_lock_init(&pblk->resubmit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) spin_lock_init(&pblk->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) spin_lock_init(&pblk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #ifdef CONFIG_NVM_PBLK_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) atomic_long_set(&pblk->inflight_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) atomic_long_set(&pblk->padded_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) atomic_long_set(&pblk->padded_wb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) atomic_long_set(&pblk->req_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) atomic_long_set(&pblk->sub_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) atomic_long_set(&pblk->sync_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) atomic_long_set(&pblk->inflight_reads, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) atomic_long_set(&pblk->cache_reads, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) atomic_long_set(&pblk->sync_reads, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) atomic_long_set(&pblk->recov_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) atomic_long_set(&pblk->recov_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) atomic_long_set(&pblk->recov_gc_writes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) atomic_long_set(&pblk->recov_gc_reads, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) atomic_long_set(&pblk->read_failed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) atomic_long_set(&pblk->read_empty, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) atomic_long_set(&pblk->read_high_ecc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) atomic_long_set(&pblk->read_failed_gc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) atomic_long_set(&pblk->write_failed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) atomic_long_set(&pblk->erase_failed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ret = pblk_core_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) pblk_err(pblk, "could not initialize core\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ret = pblk_lines_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) pblk_err(pblk, "could not initialize lines\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) goto fail_free_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ret = pblk_rwb_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) pblk_err(pblk, "could not initialize write buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto fail_free_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) pblk_err(pblk, "could not initialize maps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto fail_free_rwb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ret = pblk_writer_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (ret != -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) pblk_err(pblk, "could not initialize write thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) goto fail_free_l2p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) ret = pblk_gc_init(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) pblk_err(pblk, "could not initialize gc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto fail_stop_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* inherit the size from the underlying device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) blk_queue_write_cache(tqueue, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) tqueue->limits.discard_granularity = geo->clba * geo->csecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) tqueue->limits.discard_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) geo->all_luns, pblk->l_mg.nr_lines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) (unsigned long long)pblk->capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) pblk->rwb.nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) wake_up_process(pblk->writer_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* Check if we need to start GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) pblk_gc_should_kick(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) fail_stop_writer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) pblk_writer_stop(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) fail_free_l2p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) pblk_l2p_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) fail_free_rwb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) pblk_rwb_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) fail_free_lines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pblk_lines_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) fail_free_core:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) pblk_core_free(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) kfree(pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* physical block device target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static struct nvm_tgt_type tt_pblk = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .name = "pblk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) .version = {1, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .bops = &pblk_bops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) .capacity = pblk_capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .init = pblk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .exit = pblk_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .sysfs_init = pblk_sysfs_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) .sysfs_exit = pblk_sysfs_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static int __init pblk_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) ret = nvm_register_tgt_type(&tt_pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) bioset_exit(&pblk_bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void pblk_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) bioset_exit(&pblk_bio_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) nvm_unregister_tgt_type(&tt_pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) module_init(pblk_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) module_exit(pblk_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");