^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/ndctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/nd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "nd-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "label.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "nd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static guid_t nvdimm_btt_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static guid_t nvdimm_btt2_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static guid_t nvdimm_pfn_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static guid_t nvdimm_dax_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static u32 best_seq(u32 a, u32 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) a &= NSINDEX_SEQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) b &= NSINDEX_SEQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (a == 0 || a == b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) else if (b == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) else if (nd_inc_seq(a) == b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return ndd->nslabel_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static size_t __sizeof_namespace_index(u32 nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) NSINDEX_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) size_t index_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return (ndd->nsarea.config_size - index_size * 2) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sizeof_namespace_label(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 tmp_nslot, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 nslot, space, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Per UEFI 2.7, the minimum size of the Label Storage Area is large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * enough to hold 2 index blocks and 2 labels. The minimum index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * block size is 256 bytes. The label size is 128 for namespaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * prior to version 1.2 and at minimum 256 for version 1.2 and later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) nslot = nvdimm_num_label_slots(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) size = __sizeof_namespace_index(nslot) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (size <= space && nslot >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ndd->nsarea.config_size, sizeof_namespace_label(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int __nd_label_validate(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * On media label format consists of two index blocks followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * by an array of labels. None of these structures are ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * updated in place. A sequence number tracks the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * active index and the next one to write, while labels are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * written to free slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * | nsindex0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * | nsindex1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * | label0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * | label1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * ....nslot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * | labelN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * +------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct nd_namespace_index *nsindex[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) to_namespace_index(ndd, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) to_namespace_index(ndd, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) const int num_index = ARRAY_SIZE(nsindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct device *dev = ndd->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool valid[2] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int i, num_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (i = 0; i < num_index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 nslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u8 sig[NSINDEX_SIG_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u64 sum_save, sum, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned int version, labelsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev_dbg(dev, "nsindex%d signature invalid\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* label sizes larger than 128 arrived with v1.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) version = __le16_to_cpu(nsindex[i]->major) * 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) + __le16_to_cpu(nsindex[i]->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (version >= 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) labelsize = 1 << (7 + nsindex[i]->labelsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) labelsize = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (labelsize != sizeof_namespace_label(ndd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) i, nsindex[i]->labelsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sum_save = __le64_to_cpu(nsindex[i]->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) nsindex[i]->checksum = __cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) nsindex[i]->checksum = __cpu_to_le64(sum_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (sum != sum_save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dev_dbg(dev, "nsindex%d checksum invalid\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) seq = __le32_to_cpu(nsindex[i]->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if ((seq & NSINDEX_SEQ_MASK) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* sanity check the index against expected values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (__le64_to_cpu(nsindex[i]->myoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) != i * sizeof_namespace_index(ndd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) i, (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __le64_to_cpu(nsindex[i]->myoff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (__le64_to_cpu(nsindex[i]->otheroff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) != (!i) * sizeof_namespace_index(ndd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) i, (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __le64_to_cpu(nsindex[i]->otheroff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (__le64_to_cpu(nsindex[i]->labeloff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) != 2 * sizeof_namespace_index(ndd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) i, (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __le64_to_cpu(nsindex[i]->labeloff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) size = __le64_to_cpu(nsindex[i]->mysize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (size > sizeof_namespace_index(ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) || size < sizeof(struct nd_namespace_index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) nslot = __le32_to_cpu(nsindex[i]->nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (nslot * sizeof_namespace_label(ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) + 2 * sizeof_namespace_index(ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) > ndd->nsarea.config_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) i, nslot, ndd->nsarea.config_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) valid[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) num_valid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) switch (num_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < num_index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (valid[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* can't have num_valid > 0 but valid[] = { false, false } */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* pick the best index... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __le32_to_cpu(nsindex[1]->seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int nd_label_validate(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * In order to probe for and validate namespace index blocks we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * need to know the size of the labels, and we can't trust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * size of the labels until we validate the index blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Resolve this dependency loop by probing for known label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * sizes, but default to v1.2 256-byte namespace labels if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * discovery fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int label_size[] = { 128, 256 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for (i = 0; i < ARRAY_SIZE(label_size); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ndd->nslabel_size = label_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rc = __nd_label_validate(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void nd_label_copy(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct nd_namespace_index *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct nd_namespace_index *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* just exit if either destination or source is NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!dst || !src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) memcpy(dst, src, sizeof_namespace_index(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void *base = to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return base + 2 * sizeof_namespace_index(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int to_slot(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct nd_namespace_label *nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long label, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) label = (unsigned long) nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) base = (unsigned long) nd_label_base(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return (label - base) / sizeof_namespace_label(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long label, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) base = (unsigned long) nd_label_base(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) label = base + sizeof_namespace_label(ndd) * slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return (struct nd_namespace_label *) label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define for_each_clear_bit_le(bit, addr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) (bit) < (size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * preamble_index - common variable initialization for nd_label_* routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @ndd: dimm container for the relevant label set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @idx: namespace_index index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @nsindex_out: on return set to the currently active namespace index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @free: on return set to the free label bitmap in the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @nslot: on return set to the number of slots in the label space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct nd_namespace_index **nsindex_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned long **free, u32 *nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) nsindex = to_namespace_index(ndd, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (nsindex == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *free = (unsigned long *) nsindex->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *nslot = __le32_to_cpu(nsindex->nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *nsindex_out = nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!label_id || !uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return label_id->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static bool preamble_current(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct nd_namespace_index **nsindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned long **free, u32 *nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return preamble_index(ndd, ndd->ns_current, nsindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) free, nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static bool preamble_next(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct nd_namespace_index **nsindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long **free, u32 *nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return preamble_index(ndd, ndd->ns_next, nsindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) free, nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static bool slot_valid(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct nd_namespace_label *nd_label, u32 slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* check that we are written where we expect to be written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (slot != __le32_to_cpu(nd_label->slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* check checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (namespace_label_has(ndd, checksum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u64 sum, sum_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) sum_save = __le64_to_cpu(nd_label->checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) nd_label->checksum = __cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) nd_label->checksum = __cpu_to_le64(sum_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (sum != sum_save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) slot, sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!preamble_current(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0; /* no label, nothing to reserve */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) for_each_clear_bit_le(slot, free, nslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct nd_namespace_label *nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct nd_region *nd_region = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u8 label_uuid[NSLABEL_UUID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct nd_label_id label_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!slot_valid(ndd, nd_label, slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) flags = __le32_to_cpu(nd_label->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (test_bit(NDD_NOBLK, &nvdimm->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) flags &= ~NSLABEL_FLAG_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) nd_label_gen_id(&label_id, label_uuid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) res = nvdimm_allocate_dpa(ndd, &label_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) __le64_to_cpu(nd_label->dpa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __le64_to_cpu(nd_label->rawsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int nd_label_data_init(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) size_t config_size, read_size, max_xfer, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) u32 nslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (ndd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ndd->nsarea.max_xfer, ndd->nsarea.config_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * We need to determine the maximum index area as this is the section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * we must read and validate before we can start processing labels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * If the area is too small to contain the two indexes and 2 labels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * then we abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Start at a label size of 128 as this should result in the largest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * possible namespace index size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ndd->nslabel_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) read_size = sizeof_namespace_index(ndd) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!read_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Allocate config data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) config_size = ndd->nsarea.config_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ndd->data = kvzalloc(config_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!ndd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * We want to guarantee as few reads as possible while conserving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * memory. To do that we figure out how much unused space will be left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * in the last read, divide that by the total number of reads it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * going to take given our maximum transfer size, and then reduce our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * maximum transfer size based on that result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (read_size < max_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* trim waste */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) DIV_ROUND_UP(config_size, max_xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* make certain we read indexes in exactly 1 read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (max_xfer < read_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) max_xfer = read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Make our initial read size a multiple of max_xfer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) config_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Read the index data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* Validate index data, if not valid assume all labels are invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ndd->ns_current = nd_label_validate(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ndd->ns_current < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Record our index values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Copy "current" index on top of the "next" index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nsindex = to_current_namespace_index(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Determine starting offset for label data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) offset = __le64_to_cpu(nsindex->labeloff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) nslot = __le32_to_cpu(nsindex->nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Loop through the free list pulling in any active labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) size_t label_read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* zero out the unused labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (test_bit_le(i, nsindex->free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) memset(ndd->data + offset, 0, ndd->nslabel_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* if we already read past here then just continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (offset + ndd->nslabel_size <= read_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* if we haven't read in a while reset our read_size offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (read_size < offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) read_size = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* determine how much more will be read after this next call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) label_read_size = offset + ndd->nslabel_size - read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) max_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* truncate last read if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (read_size + label_read_size > config_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) label_read_size = config_size - read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Read the label data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) read_size, label_read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* push read_size to next read offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) read_size += label_read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int nd_label_active_count(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!preamble_current(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for_each_clear_bit_le(slot, free, nslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct nd_namespace_label *nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!slot_valid(ndd, nd_label, slot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u32 label_slot = __le32_to_cpu(nd_label->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 size = __le64_to_cpu(nd_label->rawsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) u64 dpa = __le64_to_cpu(nd_label->dpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dev_dbg(ndd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) "slot%d invalid slot: %d dpa: %llx size: %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) slot, label_slot, dpa, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!preamble_current(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for_each_clear_bit_le(slot, free, nslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct nd_namespace_label *nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!slot_valid(ndd, nd_label, slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (n-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) slot = find_next_bit_le(free, nslot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (slot == nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) clear_bit_le(slot, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) u32 nslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (slot < nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return !test_and_set_bit_le(slot, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u32 nslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return nvdimm_num_label_slots(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return bitmap_weight(free, nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) u64 checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) u32 nslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) nsindex = to_namespace_index(ndd, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (flags & ND_NSINDEX_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) nslot = nvdimm_num_label_slots(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) nslot = __le32_to_cpu(nsindex->nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) memset(&nsindex->flags, 0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) nsindex->seq = __cpu_to_le32(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) offset = (unsigned long) nsindex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) - (unsigned long) to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nsindex->myoff = __cpu_to_le64(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) offset = (unsigned long) to_namespace_index(ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) nd_label_next_nsindex(index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) - (unsigned long) to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) nsindex->otheroff = __cpu_to_le64(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) offset = (unsigned long) nd_label_base(ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) - (unsigned long) to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) nsindex->labeloff = __cpu_to_le64(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) nsindex->nslot = __cpu_to_le32(nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) nsindex->major = __cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (sizeof_namespace_label(ndd) < 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) nsindex->minor = __cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) nsindex->minor = __cpu_to_le16(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) nsindex->checksum = __cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (flags & ND_NSINDEX_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned long *free = (unsigned long *) nsindex->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 nfree = ALIGN(nslot, BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int last_bits, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) memset(nsindex->free, 0xff, nfree / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) clear_bit_le(nslot + i, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) nsindex->checksum = __cpu_to_le64(checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) nsindex, sizeof_namespace_index(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (flags & ND_NSINDEX_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* copy the index we just wrote to the new 'next' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) WARN_ON(index != ndd->ns_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) WARN_ON(ndd->ns_current == ndd->ns_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct nd_namespace_label *nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return (unsigned long) nd_label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) - (unsigned long) to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (guid_equal(guid, &nvdimm_btt_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return NVDIMM_CCLASS_BTT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) else if (guid_equal(guid, &nvdimm_btt2_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return NVDIMM_CCLASS_BTT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) else if (guid_equal(guid, &nvdimm_pfn_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return NVDIMM_CCLASS_PFN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) else if (guid_equal(guid, &nvdimm_dax_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return NVDIMM_CCLASS_DAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) else if (guid_equal(guid, &guid_null))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return NVDIMM_CCLASS_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return NVDIMM_CCLASS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) guid_t *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (claim_class == NVDIMM_CCLASS_BTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return &nvdimm_btt_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) else if (claim_class == NVDIMM_CCLASS_BTT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return &nvdimm_btt2_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) else if (claim_class == NVDIMM_CCLASS_PFN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return &nvdimm_pfn_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) else if (claim_class == NVDIMM_CCLASS_DAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return &nvdimm_dax_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * If we're modifying a namespace for which we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * know the claim_class, don't touch the existing guid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return &guid_null;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static void reap_victim(struct nd_mapping *nd_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct nd_label_ent *victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u32 slot = to_slot(ndd, victim->label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dev_dbg(ndd->dev, "free: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) nd_label_free_slot(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) victim->label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int __pmem_label_update(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int pos, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct nd_namespace_common *ndns = &nspm->nsio.common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct nd_namespace_label *nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct nd_label_ent *label_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct nd_label_id label_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u64 cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) nd_label_gen_id(&label_id, nspm->uuid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) for_each_dpa_resource(ndd, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (strcmp(res->name, label_id.id) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* allocate and write the label to the staging (next) index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) slot = nd_label_alloc_slot(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (slot == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dev_dbg(ndd->dev, "allocated: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) memset(nd_label, 0, sizeof_namespace_label(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (nspm->alt_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) nd_label->flags = __cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) nd_label->position = __cpu_to_le16(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) nd_label->isetcookie = __cpu_to_le64(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) nd_label->rawsize = __cpu_to_le64(resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) nd_label->dpa = __cpu_to_le64(res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) nd_label->slot = __cpu_to_le32(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (namespace_label_has(ndd, type_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) guid_copy(&nd_label->type_guid, &nd_set->type_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (namespace_label_has(ndd, abstraction_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) guid_copy(&nd_label->abstraction_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) to_abstraction_guid(ndns->claim_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) &nd_label->abstraction_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (namespace_label_has(ndd, checksum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) u64 sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) nd_label->checksum = __cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) nd_label->checksum = __cpu_to_le64(sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) nd_dbg_dpa(nd_region, ndd, res, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* update label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) offset = nd_label_offset(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) rc = nvdimm_set_config_data(ndd, offset, nd_label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) sizeof_namespace_label(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Garbage collect the previous label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) list_for_each_entry(label_ent, &nd_mapping->labels, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!label_ent->label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) || memcmp(nspm->uuid, label_ent->label->uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) NSLABEL_UUID_LEN) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) reap_victim(nd_mapping, label_ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* update index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) rc = nd_label_write_index(ndd, ndd->ns_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) list_for_each_entry(label_ent, &nd_mapping->labels, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!label_ent->label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) label_ent->label = nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) nd_label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) "failed to track label: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) to_slot(ndd, nd_label));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static bool is_old_resource(struct resource *res, struct resource **list, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (res->flags & DPA_RESOURCE_ADJUSTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (res == list[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static struct resource *to_resource(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct nd_namespace_label *nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) for_each_dpa_resource(ndd, res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (res->start != __le64_to_cpu(nd_label->dpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * 1/ Account all the labels that can be freed after this update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * 2/ Allocate and write the label to the staging (next) index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * 3/ Record the resources in the namespace device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static int __blk_label_update(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int num_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct nd_interleave_set *nd_set = nd_region->nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct nd_namespace_common *ndns = &nsblk->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct nd_namespace_label *nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct nd_label_ent *label_ent, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned long *free, *victim_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct resource *res, **old_res_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct nd_label_id label_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u8 uuid[NSLABEL_UUID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int min_dpa_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) old_res_list = nsblk->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) nfree = nd_label_nfree(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) old_num_resources = nsblk->num_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * We need to loop over the old resources a few times, which seems a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * bit inefficient, but we need to know that we have the label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * space before we start mutating the tracking structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * Otherwise the recovery method of last resort for userspace is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * disable and re-enable the parent region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) for_each_dpa_resource(ndd, res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (strcmp(res->name, label_id.id) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (!is_old_resource(res, old_res_list, old_num_resources))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) alloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) victims = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (old_num_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* convert old local-label-map to dimm-slot victim-map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!victim_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* mark unused labels for garbage collection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) for_each_clear_bit_le(slot, free, nslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) res = to_resource(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (res && is_old_resource(res, old_res_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) old_num_resources))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) slot = to_slot(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) set_bit(slot, victim_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) victims++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* don't allow updates that consume the last label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dev_info(&nsblk->common.dev, "insufficient label space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) bitmap_free(victim_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* from here on we need to abort on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* assign all resources to the namespace before writing the labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) nsblk->res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) nsblk->num_resources = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) for_each_dpa_resource(ndd, res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (strcmp(res->name, label_id.id) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* release slots associated with any invalidated UUIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) reap_victim(nd_mapping, label_ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) list_move(&label_ent->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Find the resource associated with the first label in the set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * per the v1.2 namespace specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) for (i = 0; i < nsblk->num_resources; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct resource *min = nsblk->res[min_dpa_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) res = nsblk->res[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (res->start < min->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) min_dpa_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) for (i = 0; i < nsblk->num_resources; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) res = nsblk->res[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (is_old_resource(res, old_res_list, old_num_resources))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) continue; /* carry-over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) slot = nd_label_alloc_slot(ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (slot == UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dev_dbg(ndd->dev, "allocated: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) memset(nd_label, 0, sizeof_namespace_label(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (nsblk->alt_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) memcpy(nd_label->name, nsblk->alt_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) NSLABEL_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * Use the presence of the type_guid as a flag to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * determine isetcookie usage and nlabel + position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * policy for blk-aperture namespaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (namespace_label_has(ndd, type_guid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (i == min_dpa_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) nd_label->position = __cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) nd_label->nlabel = __cpu_to_le16(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) nd_label->position = __cpu_to_le16(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) nd_label->nlabel = __cpu_to_le16(0); /* N/A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) nd_label->position = __cpu_to_le16(0); /* N/A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) nd_label->dpa = __cpu_to_le64(res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) nd_label->rawsize = __cpu_to_le64(resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) nd_label->slot = __cpu_to_le32(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (namespace_label_has(ndd, type_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) guid_copy(&nd_label->type_guid, &nd_set->type_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (namespace_label_has(ndd, abstraction_guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) guid_copy(&nd_label->abstraction_guid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) to_abstraction_guid(ndns->claim_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) &nd_label->abstraction_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (namespace_label_has(ndd, checksum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u64 sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) nd_label->checksum = __cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sum = nd_fletcher64(nd_label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sizeof_namespace_label(ndd), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) nd_label->checksum = __cpu_to_le64(sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* update label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) offset = nd_label_offset(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) rc = nvdimm_set_config_data(ndd, offset, nd_label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) sizeof_namespace_label(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* free up now unused slots in the new index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dev_dbg(ndd->dev, "free: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) nd_label_free_slot(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* update index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rc = nd_label_write_index(ndd, ndd->ns_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * Now that the on-dimm labels are up to date, fix up the tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * entries in nd_mapping->labels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) nlabel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) nd_label = label_ent->label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (!nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) nlabel++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) nlabel--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) list_move(&label_ent->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) label_ent->label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) list_splice_tail_init(&list, &nd_mapping->labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (nlabel + nsblk->num_resources > num_labels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Bug, we can't end up with more resources than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * available labels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) label_ent = list_first_entry_or_null(&nd_mapping->labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) typeof(*label_ent), list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (!label_ent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) for_each_clear_bit_le(slot, free, nslot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) nd_label = to_label(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) res = to_resource(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) res->flags &= ~DPA_RESOURCE_ADJUSTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (label_ent->label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) label_ent->label = nd_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) nd_label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dev_WARN(&nsblk->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) "failed to track label slot%d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) kfree(old_res_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) bitmap_free(victim_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * 1/ repair the allocated label bitmap in the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * 2/ restore the resource list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) kfree(nsblk->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) nsblk->res = old_res_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) nsblk->num_resources = old_num_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) old_res_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int i, old_num_labels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct nd_label_ent *label_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) list_for_each_entry(label_ent, &nd_mapping->labels, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) old_num_labels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * We need to preserve all the old labels for the mapping so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * they can be garbage collected after writing the new labels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) for (i = old_num_labels; i < num_labels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!label_ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) list_add_tail(&label_ent->list, &nd_mapping->labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (ndd->ns_current == -1 || ndd->ns_next == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* pass */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return max(num_labels, old_num_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) nsindex = to_namespace_index(ndd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) memset(nsindex, 0, ndd->nsarea.config_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ndd->ns_next = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ndd->ns_current = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return max(num_labels, old_num_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct nd_label_ent *label_ent, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct nd_namespace_index *nsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) u8 label_uuid[NSLABEL_UUID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) unsigned long *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u32 nslot, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) int active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (!uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* no index || no labels == nothing to delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!preamble_next(ndd, &nsindex, &free, &nslot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) mutex_lock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct nd_namespace_label *nd_label = label_ent->label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!nd_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) slot = to_slot(ndd, nd_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) nd_label_free_slot(ndd, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) dev_dbg(ndd->dev, "free: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) list_move_tail(&label_ent->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) label_ent->label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) list_splice_tail_init(&list, &nd_mapping->labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (active == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) nd_mapping_free_labels(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dev_dbg(ndd->dev, "no more active labels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) mutex_unlock(&nd_mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return nd_label_write_index(ndd, ndd->ns_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int nd_pmem_namespace_label_update(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct nd_namespace_pmem *nspm, resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) rc = del_labels(nd_mapping, nspm->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) for_each_dpa_resource(ndd, res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (strncmp(res->name, "pmem", 4) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) WARN_ON_ONCE(!count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) rc = init_labels(nd_mapping, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) NSLABEL_FLAG_UPDATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Clear the UPDATING flag per UEFI 2.7 expectations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) for (i = 0; i < nd_region->ndr_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct nd_mapping *nd_mapping = &nd_region->mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int nd_blk_namespace_label_update(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct nd_namespace_blk *nsblk, resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct nd_mapping *nd_mapping = &nd_region->mapping[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return del_labels(nd_mapping, nsblk->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) for_each_dpa_resource(to_ndd(nd_mapping), res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) count = init_labels(nd_mapping, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return __blk_label_update(nd_region, nd_mapping, nsblk, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int __init nd_label_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }