^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2016 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __DAX_PRIVATE_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __DAX_PRIVATE_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /* private routines between core files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct dax_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct dax_device *inode_dax(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct inode *dax_inode(struct dax_device *dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int dax_bus_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) void dax_bus_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * struct dax_region - mapping infrastructure for dax devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @id: kernel-wide unique region for a memory range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @target_node: effective numa node if this memory range is onlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @kref: to pin while other agents have a need to do lookups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @dev: parent device backing this region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @align: allocation and mapping alignment for child dax devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * @ida: instance id allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @res: resource tree to track instance allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * @seed: allow userspace to find the first unbound seed device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @youngest: allow userspace to find the most recently created device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct dax_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int target_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct ida ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct device *seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct device *youngest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct dax_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int range_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * struct dev_dax - instance data for a subdivision of a dax region, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * data while the device is activated in the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @region - parent region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @dax_dev - core dax functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @target_node: effective numa node if dev_dax memory range is onlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @id: ida allocated id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @ida: mapping id allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @dev - device core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @pgmap - pgmap for memmap setup / lifetime (driver owned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @nr_range: size of @ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @ranges: resource-span + pgoff tuples for the instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct dev_dax {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct dax_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct dax_device *dax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int target_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct ida ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct dev_pagemap *pgmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int nr_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct dev_dax_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct dax_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } *ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline struct dev_dax *to_dev_dax(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return container_of(dev, struct dev_dax, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline struct dax_mapping *to_dax_mapping(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return container_of(dev, struct dax_mapping, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline bool dax_align_valid(unsigned long align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (align == PMD_SIZE && has_transparent_hugepage())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (align == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline bool dax_align_valid(unsigned long align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return align == PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif