^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVDIMM Block Window Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2014, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/nd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "nd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return nsblk->lbasize - nsblk_meta_size(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) resource_size_t ns_offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) for (i = 0; i < nsblk->num_resources; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (ns_offset < resource_size(nsblk->res[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (ns_offset + len > resource_size(nsblk->res[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dev_WARN_ONCE(&nsblk->common.dev, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "illegal request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return nsblk->res[i]->start + ns_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ns_offset -= resource_size(nsblk->res[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct nd_region *nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct device *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) parent = nsblk->common.dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) nd_region = container_of(parent, struct nd_region, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return container_of(nd_region, struct nd_blk_region, nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct bio_integrity_payload *bip, u64 lba, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct nd_blk_region *ndbr = to_ndbr(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int len = nsblk_meta_size(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) resource_size_t dev_offset, ns_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 internal_lbasize, sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) internal_lbasize = nsblk_internal_lbasize(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) sector_size = nsblk_sector_size(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ns_offset = lba * internal_lbasize + sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dev_offset = to_dev_offset(nsblk, ns_offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (dev_offset == SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void *iobuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * .bv_offset already adjusted for iter->bi_bvec_done, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * can use those directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cur_len = min(len, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) iobuf = kmap_atomic(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cur_len, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) kunmap_atomic(iobuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) len -= cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dev_offset += cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #else /* CONFIG_BLK_DEV_INTEGRITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct bio_integrity_payload *bip, u64 lba, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct bio_integrity_payload *bip, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int len, unsigned int off, int rw, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct nd_blk_region *ndbr = to_ndbr(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) resource_size_t dev_offset, ns_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 internal_lbasize, sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void *iobuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) internal_lbasize = nsblk_internal_lbasize(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) sector_size = nsblk_sector_size(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * If we don't have an integrity payload, we don't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * split the bvec into sectors, as this would cause unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Block Window setup/move steps. the do_io routine is capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * of handling len <= PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cur_len = bip ? min(len, sector_size) : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) lba = div_u64(sector << SECTOR_SHIFT, sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ns_offset = lba * internal_lbasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (dev_offset == SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) iobuf = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) kunmap_atomic(iobuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (bip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) len -= cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) off += cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) sector += sector_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static blk_qc_t nd_blk_submit_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct bio_integrity_payload *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int err = 0, rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bool do_acct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!bio_integrity_prep(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rw = bio_data_dir(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) do_acct = blk_queue_io_stat(bio->bi_disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (do_acct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) start = bio_start_io_acct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bio_for_each_segment(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned int len = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUG_ON(len > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bvec.bv_offset, rw, iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_dbg(&nsblk->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) "io error in %s sector %lld, len %d,\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (rw == READ) ? "READ" : "WRITE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) (unsigned long long) iter.bi_sector, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bio->bi_status = errno_to_blk_status(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (do_acct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bio_end_io_acct(bio, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) resource_size_t offset, void *iobuf, size_t n, int rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct nd_blk_region *ndbr = to_ndbr(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) resource_size_t dev_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dev_offset = to_dev_offset(nsblk, offset, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (unlikely(offset + n > nsblk->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (dev_offset == SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static const struct block_device_operations nd_blk_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .submit_bio = nd_blk_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void nd_blk_release_queue(void *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) blk_cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void nd_blk_release_disk(void *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) del_gendisk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) put_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct device *dev = &nsblk->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) resource_size_t available_disk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u64 internal_nlba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) q = blk_alloc_queue(NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) blk_queue_max_hw_sectors(q, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) disk = alloc_disk(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) disk->first_minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) disk->fops = &nd_blk_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) disk->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) disk->flags = GENHD_FL_EXT_DEVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) disk->private_data = nsblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (nsblk_meta_size(nsblk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) device_add_disk(dev, disk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) nvdimm_check_and_set_ro(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int nd_blk_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct nd_namespace_common *ndns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct nd_namespace_blk *nsblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ndns = nvdimm_namespace_common_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (IS_ERR(ndns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return PTR_ERR(ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) nsblk = to_nd_namespace_blk(&ndns->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nsblk->size = nvdimm_namespace_capacity(ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dev_set_drvdata(dev, nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ndns->rw_bytes = nsblk_rw_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (is_nd_btt(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return nvdimm_namespace_attach_btt(ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else if (nd_btt_probe(dev, ndns) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* we'll come back as btt-blk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return nsblk_attach_disk(nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int nd_blk_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (is_nd_btt(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) nvdimm_namespace_detach_btt(to_nd_btt(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static struct nd_device_driver nd_blk_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .probe = nd_blk_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .remove = nd_blk_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .name = "nd_blk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .type = ND_DRIVER_NAMESPACE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int __init nd_blk_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return nd_driver_register(&nd_blk_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void __exit nd_blk_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) driver_unregister(&nd_blk_driver.drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) module_init(nd_blk_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) module_exit(nd_blk_exit);