^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ND_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ND_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/libnvdimm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/badblocks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ndctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/nd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "label.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Limits the maximum number of block apertures a dimm can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * support and is an input to the geometry/on-disk-format of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * BTT instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ND_MAX_LANES = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) INT_LBASIZE_ALIGNMENT = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) NVDIMM_IO_ATOMIC = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct nvdimm_drvdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int nslabel_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct nd_cmd_get_config_size nsarea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int ns_current, ns_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct resource dpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct nd_region_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int ns_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int ns_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int hints_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void __iomem *flush_wpq[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int dimm, int hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int num = 1 << ndrd->hints_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int mask = num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return ndrd->flush_wpq[dimm * num + (hint & mask)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int hint, void __iomem *flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int num = 1 << ndrd->hints_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int mask = num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline struct nd_namespace_index *to_namespace_index(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct nvdimm_drvdata *ndd, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return ndd->data + sizeof_namespace_index(ndd) * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline struct nd_namespace_index *to_current_namespace_index(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return to_namespace_index(ndd, ndd->ns_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline struct nd_namespace_index *to_next_namespace_index(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct nvdimm_drvdata *ndd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return to_namespace_index(ndd, ndd->ns_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define namespace_label_has(ndd, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) (offsetof(struct nd_namespace_label, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) < sizeof_namespace_label(ndd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define nd_dbg_dpa(r, d, res, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) (unsigned long long) (res ? resource_size(res) : 0), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) (unsigned long long) (res ? res->start : 0), ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define for_each_dpa_resource(ndd, res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (res = (ndd)->dpa.child; res; res = res->sibling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define for_each_dpa_resource_safe(ndd, res, next) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) res; res = next, next = next ? next->sibling : NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct nd_percpu_lane {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum nd_label_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ND_LABEL_REAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct nd_label_ent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct nd_namespace_label *label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum nd_mapping_lock_class {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ND_MAPPING_CLASS0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ND_MAPPING_UUID_SCAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct nd_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct nvdimm *nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct list_head labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @ndd is for private use at region enable / disable time for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * get_ndd() + put_ndd(), all other nd_mapping to ndd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * conversions use to_ndd() which respects enabled state of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * nvdimm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct nvdimm_drvdata *ndd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct nd_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct ida ns_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct ida btt_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct ida pfn_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct ida dax_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct device *ns_seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct device *btt_seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct device *pfn_seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct device *dax_seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u16 ndr_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u64 ndr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u64 ndr_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int id, num_lanes, ro, numa_node, target_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void *provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct kernfs_node *bb_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct badblocks bb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct nd_interleave_set *nd_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct nd_percpu_lane __percpu *lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int (*flush)(struct nd_region *nd_region, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct nd_mapping mapping[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct nd_blk_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void *iobuf, u64 len, int rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void *blk_provider_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct nd_region nd_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Lookup next in the repeating sequence of 01, 10, and 11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline unsigned nd_inc_seq(unsigned seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static const unsigned next[] = { 0, 2, 3, 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return next[seq & 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct btt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct nd_btt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct nd_namespace_common *ndns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct btt *btt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long lbasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u8 *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int initial_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u16 version_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u16 version_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) enum nd_pfn_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) PFN_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) PFN_MODE_RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) PFN_MODE_PMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct nd_pfn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u8 *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long npfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum nd_pfn_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct nd_pfn_sb *pfn_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct nd_namespace_common *ndns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct nd_dax {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct nd_pfn nd_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline u32 nd_info_block_reserve(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ALIGN(SZ_8K, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) enum nd_async_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ND_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ND_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void wait_nvdimm_bus_probe_idle(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) void nd_device_register(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void nd_device_notify(struct device *dev, enum nvdimm_event event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ssize_t nd_size_select_show(unsigned long current_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) const unsigned long *supported, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ssize_t nd_size_select_store(struct device *dev, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long *current_size, const unsigned long *supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int __init nvdimm_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int __init nd_region_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int __init nd_label_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void nvdimm_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void nd_region_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct nvdimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) extern const struct attribute_group nd_device_attribute_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) extern const struct attribute_group nd_numa_attribute_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) extern const struct attribute_group *nvdimm_bus_attribute_groups[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int nvdimm_check_config_data(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) size_t offset, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void *buf, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void nvdimm_set_labeling(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void nvdimm_set_locked(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) void nvdimm_clear_locked(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int nvdimm_security_setup_events(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int nvdimm_security_unlock(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline int nvdimm_security_unlock(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct nd_btt *to_nd_btt(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct nd_gen_sb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) char reserved[SZ_4K - 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) __le64 checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u64 nd_sb_checksum(struct nd_gen_sb *sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #if IS_ENABLED(CONFIG_BTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bool is_nd_btt(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct device *nd_btt_create(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline int nd_btt_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct nd_namespace_common *ndns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline bool is_nd_btt(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline struct device *nd_btt_create(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct nd_pfn *to_nd_pfn(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #if IS_ENABLED(CONFIG_NVDIMM_PFN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define MAX_NVDIMM_ALIGN 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bool is_nd_pfn(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct device *nd_pfn_create(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) extern const struct attribute_group *nd_pfn_attribute_groups[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline int nd_pfn_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct nd_namespace_common *ndns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline bool is_nd_pfn(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static inline struct device *nd_pfn_create(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct nd_dax *to_nd_dax(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #if IS_ENABLED(CONFIG_NVDIMM_DAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bool is_nd_dax(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct device *nd_dax_create(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static inline int nd_dax_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct nd_namespace_common *ndns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline bool is_nd_dax(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static inline struct device *nd_dax_create(struct nd_region *nd_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int nd_region_to_nstype(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct nd_namespace_index *nsindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void nvdimm_bus_lock(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void nvdimm_bus_unlock(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) bool is_nvdimm_bus_locked(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void nvdimm_check_and_set_ro(struct gendisk *disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void nvdimm_drvdata_release(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) void put_ndd(struct nvdimm_drvdata *ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct nd_label_id *label_id, resource_size_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) resource_size_t n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) void nvdimm_badblocks_populate(struct nd_region *nd_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct badblocks *bb, const struct range *range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) resource_size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void devm_namespace_disable(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct nd_namespace_common *ndns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #if IS_ENABLED(CONFIG_ND_CLAIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* max struct page size independent of kernel config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define MAX_STRUCT_PAGE_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct dev_pagemap *pgmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int nd_blk_region_init(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int nd_region_activate(struct nd_region *nd_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (bb->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int num_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return !!badblocks_check(bb, sector, len / 512, &first_bad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) &num_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const u8 *nd_dev_to_uuid(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) bool pmem_should_map_pages(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #endif /* __ND_H__ */