^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/memregion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/nd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "nd-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "nd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * irrelevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io-64-nonatomic-hi-lo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static DEFINE_PER_CPU(int, flush_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct nd_region_data *ndrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) for (i = 0; i < (1 << ndrd->hints_shift); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct resource *res = &nvdimm->flush_wpq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long pfn = PHYS_PFN(res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void __iomem *flush_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* check if flush hints share a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct resource *res_j = &nvdimm->flush_wpq[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long pfn_j = PHYS_PFN(res_j->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (pfn == pfn_j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (j < i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) flush_page = (void __iomem *) ((unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ndrd_get_flush_wpq(ndrd, dimm, j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) flush_page = devm_nvdimm_ioremap(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) PFN_PHYS(pfn), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (!flush_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) + (res->start & ~PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int nd_region_activate(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int i, j, num_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct nd_region_data *ndrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct device *dev = &nd_region->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) size_t flush_data_size = sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) nvdimm_bus_lock(&nd_region->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) nvdimm_bus_unlock(&nd_region->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* at least one null hint slot per-dimm for the "no-hint" case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) flush_data_size += sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) num_flush = min_not_zero(num_flush, nvdimm->num_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!nvdimm->num_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) flush_data_size += nvdimm->num_flush * sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) nvdimm_bus_unlock(&nd_region->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!ndrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dev_set_drvdata(dev, ndrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!num_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ndrd->hints_shift = ilog2(num_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Clear out entries that are duplicates. This should prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * extra flushings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* ignore if NULL already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!ndrd_get_flush_wpq(ndrd, i, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) for (j = i + 1; j < nd_region->ndr_mappings; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (ndrd_get_flush_wpq(ndrd, i, 0) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ndrd_get_flush_wpq(ndrd, j, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ndrd_set_flush_wpq(ndrd, j, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void nd_region_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) put_device(&nvdimm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) free_percpu(nd_region->lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) memregion_free(nd_region->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (is_nd_blk(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) kfree(to_nd_blk_region(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) kfree(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct nd_region *to_nd_region(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) WARN_ON(dev->type->release != nd_region_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) EXPORT_SYMBOL_GPL(to_nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct device *nd_region_dev(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return &nd_region->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) EXPORT_SYMBOL_GPL(nd_region_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct nd_blk_region *to_nd_blk_region(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) WARN_ON(!is_nd_blk(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return container_of(nd_region, struct nd_blk_region, nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL_GPL(to_nd_blk_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void *nd_region_provider_data(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return nd_region->provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) EXPORT_SYMBOL_GPL(nd_region_provider_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return ndbr->blk_provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ndbr->blk_provider_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * nd_region_to_nstype() - region to an integer namespace type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @nd_region: region-device to interrogate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * This is the 'nstype' attribute of a region as well, an input to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * namespace devices with namespace drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int nd_region_to_nstype(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (is_memory(&nd_region->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u16 i, label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (test_bit(NDD_LABELING, &nvdimm->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) label++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ND_DEVICE_NAMESPACE_PMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ND_DEVICE_NAMESPACE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) } else if (is_nd_blk(&nd_region->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return ND_DEVICE_NAMESPACE_BLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) EXPORT_SYMBOL(nd_region_to_nstype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static unsigned long long region_size(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (is_memory(&nd_region->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return nd_region->ndr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else if (nd_region->ndr_mappings == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct nd_mapping *nd_mapping = &nd_region->mapping[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return nd_mapping->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static ssize_t size_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return sprintf(buf, "%llu\n", region_size(nd_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static DEVICE_ATTR_RO(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static ssize_t deep_flush_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * NOTE: in the nvdimm_has_flush() error case this attribute is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * not visible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int rc = strtobool(buf, &flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) rc = nvdimm_flush(nd_region, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static DEVICE_ATTR_RW(deep_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static ssize_t mappings_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return sprintf(buf, "%d\n", nd_region->ndr_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static DEVICE_ATTR_RO(mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static ssize_t nstype_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static DEVICE_ATTR_RO(nstype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static ssize_t set_cookie_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ssize_t rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (is_memory(dev) && nd_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* pass, should be precluded by region_visible */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * The cookie to show depends on which specification of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * labels we are using. If there are not labels then default to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * the v1.1 namespace label cookie definition. To read all this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * data we need to wait for probing to settle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) nd_device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) wait_nvdimm_bus_probe_idle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (nd_region->ndr_mappings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct nd_mapping *nd_mapping = &nd_region->mapping[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ndd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) nsindex = to_namespace_index(ndd, ndd->ns_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rc = sprintf(buf, "%#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) nd_region_interleave_set_cookie(nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) nsindex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nd_device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return sprintf(buf, "%#llx\n", nd_set->cookie1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static DEVICE_ATTR_RO(set_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) resource_size_t blk_max_overlap = 0, available, overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) overlap = blk_max_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* if a dimm is disabled the available capacity is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (is_memory(&nd_region->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) available += nd_pmem_available_dpa(nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) nd_mapping, &overlap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (overlap > blk_max_overlap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) blk_max_overlap = overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else if (is_nd_blk(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) available += nd_blk_available_dpa(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) resource_size_t available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (is_memory(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) available = PHYS_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (is_memory(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) available = min(available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) nd_pmem_max_contiguous_dpa(nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) nd_mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) else if (is_nd_blk(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) available += nd_blk_available_dpa(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (is_memory(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return available * nd_region->ndr_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static ssize_t available_size_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long long available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Flush in-flight updates and grab a snapshot of the available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * size. Of course, this value is potentially invalidated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * memory nvdimm_bus_lock() is dropped, but that's userspace's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * problem to not race itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) nd_device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) wait_nvdimm_bus_probe_idle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) available = nd_region_available_dpa(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) nd_device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return sprintf(buf, "%llu\n", available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static DEVICE_ATTR_RO(available_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static ssize_t max_available_extent_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long long available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) nd_device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) wait_nvdimm_bus_probe_idle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) available = nd_region_allocatable_dpa(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) nd_device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return sprintf(buf, "%llu\n", available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static DEVICE_ATTR_RO(max_available_extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static ssize_t init_namespaces_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct nd_region_data *ndrd = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (ndrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static DEVICE_ATTR_RO(init_namespaces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static ssize_t namespace_seed_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (nd_region->ns_seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) rc = sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static DEVICE_ATTR_RO(namespace_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static ssize_t btt_seed_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (nd_region->btt_seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rc = sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static DEVICE_ATTR_RO(btt_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static ssize_t pfn_seed_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (nd_region->pfn_seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rc = sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static DEVICE_ATTR_RO(pfn_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static ssize_t dax_seed_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (nd_region->dax_seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rc = sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static DEVICE_ATTR_RO(dax_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static ssize_t read_only_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return sprintf(buf, "%d\n", nd_region->ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static ssize_t read_only_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) bool ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int rc = strtobool(buf, &ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) nd_region->ro = ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static DEVICE_ATTR_RW(read_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static ssize_t align_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return sprintf(buf, "%#lx\n", nd_region->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static ssize_t align_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned long val, dpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rc = kstrtoul(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!nd_region->ndr_mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * Ensure space-align is evenly divisible by the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * interleave-width because the kernel typically has no facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * to determine which DIMM(s), dimm-physical-addresses, would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * contribute to the tail capacity in system-physical-address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * space for the namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) || val > region_size(nd_region) || remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Given that space allocation consults this value multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * times ensure it does not change for the duration of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) nd_region->align = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static DEVICE_ATTR_RW(align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static ssize_t region_badblocks_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) nd_device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) rc = badblocks_show(&nd_region->bb, buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) nd_device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static ssize_t resource_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return sprintf(buf, "%#llx\n", nd_region->ndr_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static DEVICE_ATTR_ADMIN_RO(resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static ssize_t persistence_domain_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return sprintf(buf, "cpu_cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return sprintf(buf, "memory_controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static DEVICE_ATTR_RO(persistence_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static struct attribute *nd_region_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) &dev_attr_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) &dev_attr_align.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) &dev_attr_nstype.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) &dev_attr_mappings.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) &dev_attr_btt_seed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &dev_attr_pfn_seed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) &dev_attr_dax_seed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) &dev_attr_deep_flush.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) &dev_attr_read_only.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) &dev_attr_set_cookie.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) &dev_attr_available_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) &dev_attr_max_available_extent.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &dev_attr_namespace_seed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) &dev_attr_init_namespaces.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) &dev_attr_badblocks.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &dev_attr_resource.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) &dev_attr_persistence_domain.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct device *dev = container_of(kobj, typeof(*dev), kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int type = nd_region_to_nstype(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (a == &dev_attr_resource.attr && !is_memory(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (a == &dev_attr_deep_flush.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int has_flush = nvdimm_has_flush(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (has_flush == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) else if (has_flush == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 0444;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (a == &dev_attr_persistence_domain.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (a == &dev_attr_align.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (a != &dev_attr_set_cookie.attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) && a != &dev_attr_available_size.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if ((type == ND_DEVICE_NAMESPACE_PMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) || type == ND_DEVICE_NAMESPACE_BLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) && a == &dev_attr_available_size.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else if (is_memory(dev) && nd_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static ssize_t mappingN(struct device *dev, char *buf, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct nd_mapping *nd_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct nvdimm *nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (n >= nd_region->ndr_mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) nd_mapping = &nd_region->mapping[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) nd_mapping->start, nd_mapping->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) nd_mapping->position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #define REGION_MAPPING(idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static ssize_t mapping##idx##_show(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct device_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return mappingN(dev, buf, idx); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static DEVICE_ATTR_RO(mapping##idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * 32 should be enough for a while, even in the presence of socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * interleave a 32-way interleave set is a degenerate case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) REGION_MAPPING(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) REGION_MAPPING(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) REGION_MAPPING(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) REGION_MAPPING(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) REGION_MAPPING(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) REGION_MAPPING(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) REGION_MAPPING(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) REGION_MAPPING(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) REGION_MAPPING(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) REGION_MAPPING(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) REGION_MAPPING(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) REGION_MAPPING(11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) REGION_MAPPING(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) REGION_MAPPING(13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) REGION_MAPPING(14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) REGION_MAPPING(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) REGION_MAPPING(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) REGION_MAPPING(17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) REGION_MAPPING(18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) REGION_MAPPING(19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) REGION_MAPPING(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) REGION_MAPPING(21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) REGION_MAPPING(22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) REGION_MAPPING(23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) REGION_MAPPING(24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) REGION_MAPPING(25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) REGION_MAPPING(26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) REGION_MAPPING(27);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) REGION_MAPPING(28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) REGION_MAPPING(29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) REGION_MAPPING(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) REGION_MAPPING(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct device *dev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct nd_region *nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (n < nd_region->ndr_mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static struct attribute *mapping_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) &dev_attr_mapping0.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) &dev_attr_mapping1.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) &dev_attr_mapping2.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) &dev_attr_mapping3.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) &dev_attr_mapping4.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) &dev_attr_mapping5.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) &dev_attr_mapping6.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) &dev_attr_mapping7.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) &dev_attr_mapping8.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) &dev_attr_mapping9.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) &dev_attr_mapping10.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) &dev_attr_mapping11.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) &dev_attr_mapping12.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) &dev_attr_mapping13.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) &dev_attr_mapping14.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) &dev_attr_mapping15.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) &dev_attr_mapping16.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) &dev_attr_mapping17.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) &dev_attr_mapping18.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) &dev_attr_mapping19.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) &dev_attr_mapping20.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) &dev_attr_mapping21.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) &dev_attr_mapping22.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) &dev_attr_mapping23.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) &dev_attr_mapping24.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) &dev_attr_mapping25.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) &dev_attr_mapping26.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) &dev_attr_mapping27.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) &dev_attr_mapping28.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) &dev_attr_mapping29.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) &dev_attr_mapping30.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) &dev_attr_mapping31.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static const struct attribute_group nd_mapping_attribute_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .is_visible = mapping_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) .attrs = mapping_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static const struct attribute_group nd_region_attribute_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .attrs = nd_region_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .is_visible = region_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static const struct attribute_group *nd_region_attribute_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) &nd_device_attribute_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) &nd_region_attribute_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) &nd_numa_attribute_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) &nd_mapping_attribute_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static const struct device_type nd_blk_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) .name = "nd_blk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) .release = nd_region_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) .groups = nd_region_attribute_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static const struct device_type nd_pmem_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) .name = "nd_pmem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) .release = nd_region_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) .groups = nd_region_attribute_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static const struct device_type nd_volatile_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) .name = "nd_volatile",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) .release = nd_region_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) .groups = nd_region_attribute_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) bool is_nd_pmem(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return dev ? dev->type == &nd_pmem_device_type : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bool is_nd_blk(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return dev ? dev->type == &nd_blk_device_type : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) bool is_nd_volatile(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return dev ? dev->type == &nd_volatile_device_type : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct nd_namespace_index *nsindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!nd_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (nsindex && __le16_to_cpu(nsindex->major) == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) && __le16_to_cpu(nsindex->minor) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return nd_set->cookie1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return nd_set->cookie2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (nd_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return nd_set->altcookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct nd_label_ent *label_ent, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) lockdep_assert_held(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) list_del(&label_ent->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) kfree(label_ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * When a namespace is activated create new seeds for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * namespace, or namespace-personality to be configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) nvdimm_bus_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (nd_region->ns_seed == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) nd_region_create_ns_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) } else if (is_nd_btt(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct nd_btt *nd_btt = to_nd_btt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (nd_region->btt_seed == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) nd_region_create_btt_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (nd_region->ns_seed == &nd_btt->ndns->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) nd_region_create_ns_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) } else if (is_nd_pfn(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct nd_pfn *nd_pfn = to_nd_pfn(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (nd_region->pfn_seed == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) nd_region_create_pfn_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (nd_region->ns_seed == &nd_pfn->ndns->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) nd_region_create_ns_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) } else if (is_nd_dax(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct nd_dax *nd_dax = to_nd_dax(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (nd_region->dax_seed == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) nd_region_create_dax_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) nd_region_create_ns_seed(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) nvdimm_bus_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int nd_blk_region_init(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct device *dev = &nd_region->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!is_nd_blk(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (nd_region->ndr_mappings < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dev_dbg(dev, "invalid BLK region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * nd_region_acquire_lane - allocate and lock a lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @nd_region: region id and number of lanes possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * We optimize for the common case where there are 256 lanes, one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * per-cpu. For larger systems we need to lock to share lanes. For now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * this implementation assumes the cost of maintaining an allocator for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * free lanes is on the order of the lock hold time, so it implements a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * static lane = cpu % num_lanes mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * In the case of a BTT instance on top of a BLK namespace a lane may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * acquired recursively. We lock on the first instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * In the case of a BTT instance on top of PMEM, we only acquire a lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * for the BTT metadata updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) unsigned int cpu, lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (nd_region->num_lanes < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct nd_percpu_lane *ndl_lock, *ndl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) lane = cpu % nd_region->num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ndl_count = per_cpu_ptr(nd_region->lane, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ndl_lock = per_cpu_ptr(nd_region->lane, lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (ndl_count->count++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) spin_lock(&ndl_lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) lane = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) EXPORT_SYMBOL(nd_region_acquire_lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (nd_region->num_lanes < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct nd_percpu_lane *ndl_lock, *ndl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ndl_count = per_cpu_ptr(nd_region->lane, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ndl_lock = per_cpu_ptr(nd_region->lane, lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (--ndl_count->count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_unlock(&ndl_lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) EXPORT_SYMBOL(nd_region_release_lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * PowerPC requires this alignment for memremap_pages(). All other archs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static unsigned long default_align(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned long align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int i, mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u32 remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (is_nd_blk(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) align = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) align = MEMREMAP_COMPAT_ALIGN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) align = MEMREMAP_COMPAT_ALIGN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) align = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) mappings = max_t(u16, 1, nd_region->ndr_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) div_u64_rem(align, mappings, &remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) align *= mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct nd_region_desc *ndr_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) const struct device_type *dev_type, const char *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct nd_region *nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) void *region_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int ro = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) for (i = 0; i < ndr_desc->num_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct nvdimm *nvdimm = mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if ((mapping->start | mapping->size) % PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) dev_err(&nvdimm_bus->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) "%s: %s mapping%d is not %ld aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (test_bit(NDD_UNARMED, &nvdimm->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (test_bit(NDD_NOBLK, &nvdimm->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) && dev_type == &nd_blk_device_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) caller, dev_name(&nvdimm->dev), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (dev_type == &nd_blk_device_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct nd_blk_region_desc *ndbr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct nd_blk_region *ndbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ndbr_desc = to_blk_region_desc(ndr_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * ndr_desc->num_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (ndbr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) nd_region = &ndbr->nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ndbr->enable = ndbr_desc->enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ndbr->do_io = ndbr_desc->do_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) region_buf = ndbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) nd_region = kzalloc(struct_size(nd_region, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ndr_desc->num_mappings),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) region_buf = nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!region_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) nd_region->id = memregion_alloc(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (nd_region->id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto err_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) nd_region->lane = alloc_percpu(struct nd_percpu_lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!nd_region->lane)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) goto err_percpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) for (i = 0; i < nr_cpu_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct nd_percpu_lane *ndl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ndl = per_cpu_ptr(nd_region->lane, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) spin_lock_init(&ndl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ndl->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) for (i = 0; i < ndr_desc->num_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct nvdimm *nvdimm = mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) nd_region->mapping[i].nvdimm = nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) nd_region->mapping[i].start = mapping->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) nd_region->mapping[i].size = mapping->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) nd_region->mapping[i].position = mapping->position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) INIT_LIST_HEAD(&nd_region->mapping[i].labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) mutex_init(&nd_region->mapping[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) get_device(&nvdimm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) nd_region->ndr_mappings = ndr_desc->num_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) nd_region->provider_data = ndr_desc->provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) nd_region->nd_set = ndr_desc->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) nd_region->num_lanes = ndr_desc->num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) nd_region->flags = ndr_desc->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) nd_region->ro = ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) nd_region->numa_node = ndr_desc->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) nd_region->target_node = ndr_desc->target_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) ida_init(&nd_region->ns_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) ida_init(&nd_region->btt_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ida_init(&nd_region->pfn_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ida_init(&nd_region->dax_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dev = &nd_region->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_set_name(dev, "region%d", nd_region->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev->parent = &nvdimm_bus->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dev->type = dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dev->groups = ndr_desc->attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dev->of_node = ndr_desc->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) nd_region->ndr_size = resource_size(ndr_desc->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) nd_region->ndr_start = ndr_desc->res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) nd_region->align = default_align(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (ndr_desc->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) nd_region->flush = ndr_desc->flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) nd_region->flush = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) nd_device_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) err_percpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) memregion_free(nd_region->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) err_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) kfree(region_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct nd_region_desc *ndr_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ndr_desc->num_lanes = ND_MAX_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct nd_region_desc *ndr_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (ndr_desc->num_mappings > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct nd_region_desc *ndr_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ndr_desc->num_lanes = ND_MAX_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!nd_region->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) rc = generic_nvdimm_flush(nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (nd_region->flush(nd_region, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * nvdimm_flush - flush any posted write queues between the cpu and pmem media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * @nd_region: blk or interleaved pmem region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int generic_nvdimm_flush(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Try to encourage some diversity in flush hint addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * across cpus assuming a limited number of flush hints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) idx = this_cpu_read(flush_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * The pmem_wmb() is needed to 'sfence' all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * previous writes such that they are architecturally visible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * the platform buffer flush. Note that we've already arranged for pmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * writes to avoid the cache via memcpy_flushcache(). The final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * wmb() ensures ordering for the NVDIMM flush write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) pmem_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (i = 0; i < nd_region->ndr_mappings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (ndrd_get_flush_wpq(ndrd, i, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) EXPORT_SYMBOL_GPL(nvdimm_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * nvdimm_has_flush - determine write flushing requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * @nd_region: blk or interleaved pmem region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * Returns 1 if writes require flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Returns 0 if writes do not require flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * Returns -ENXIO if flushing capability can not be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int nvdimm_has_flush(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* no nvdimm or pmem api == flushing capability unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (nd_region->ndr_mappings == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Test if an explicit flush function is defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Test if any flush hints for the region are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct nvdimm *nvdimm = nd_mapping->nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* flush hints present / available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (nvdimm->num_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * The platform defines dimm devices without hints nor explicit flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * assume platform persistence mechanism like ADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) EXPORT_SYMBOL_GPL(nvdimm_has_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) int nvdimm_has_cache(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return is_nd_pmem(&nd_region->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) EXPORT_SYMBOL_GPL(nvdimm_has_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) bool is_nvdimm_sync(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (is_nd_volatile(&nd_region->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return is_nd_pmem(&nd_region->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) !test_bit(ND_REGION_ASYNC, &nd_region->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) EXPORT_SYMBOL_GPL(is_nvdimm_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct conflict_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct nd_region *nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) resource_size_t start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int region_conflict(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct nd_region *nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct conflict_context *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) resource_size_t res_end, region_end, region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!is_memory(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) nd_region = to_nd_region(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (nd_region == ctx->nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) res_end = ctx->start + ctx->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) region_start = nd_region->ndr_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) region_end = region_start + nd_region->ndr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (ctx->start >= region_start && ctx->start < region_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (res_end > region_start && res_end <= region_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct conflict_context ctx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .nd_region = nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .start = start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .size = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }