Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Internal header file for device mapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2001, 2002 Sistina Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This file is released under the LGPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #ifndef DM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define DM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "dm-stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * Suspend feature flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Status feature flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * List of devices that a metadevice uses and should open/close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct dm_dev_internal {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	refcount_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct dm_dev *dm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) struct dm_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) struct dm_md_mempools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Internal table functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void dm_table_event_callback(struct dm_table *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			     void (*fn)(void *), void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) bool dm_table_has_no_data_devices(struct dm_table *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) int dm_calculate_queue_limits(struct dm_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			      struct queue_limits *limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			       struct queue_limits *limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct list_head *dm_table_get_devices(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) void dm_table_presuspend_targets(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) void dm_table_presuspend_undo_targets(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) void dm_table_postsuspend_targets(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) int dm_table_resume_targets(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) enum dm_queue_mode dm_table_get_type(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) bool dm_table_bio_based(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) bool dm_table_request_based(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) void dm_table_free_md_mempools(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			   int *blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			   sector_t start, sector_t len, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void dm_lock_md_type(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void dm_unlock_md_type(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * To check whether the target type is bio-based or not (request-based).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define dm_target_bio_based(t) ((t)->type->map != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * To check whether the target type is request-based or not (bio-based).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * To check whether the target type is a hybrid (capable of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * either request-based or bio-based).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*-----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * A registry of target types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *---------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int dm_target_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void dm_target_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct target_type *dm_get_target_type(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void dm_put_target_type(struct target_type *tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int dm_target_iterate(void (*iter_func)(struct target_type *tt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 					void *param), void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int dm_split_args(int *argc, char ***argvp, char *input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * Is this mapped_device being deleted?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int dm_deleting_md(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * Is this mapped_device suspended?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int dm_suspended_md(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * Internal suspend and resume methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int dm_suspended_internally_md(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void dm_internal_suspend_fast(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void dm_internal_resume_fast(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void dm_internal_suspend_noflush(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void dm_internal_resume(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * Test if the device is scheduled for deferred remove.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int dm_test_deferred_remove_flag(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Try to remove devices marked for deferred removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void dm_deferred_remove(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * The device-mapper can be driven through one of two interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * ioctl or filesystem, depending which patch you have applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int dm_interface_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void dm_interface_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * sysfs interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int dm_sysfs_init(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void dm_sysfs_exit(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct kobject *dm_kobject(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * The kobject helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void dm_kobject_release(struct kobject *kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * Targets for linear and striped mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int dm_linear_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void dm_linear_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int dm_stripe_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void dm_stripe_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * mapped_device operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void dm_destroy(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void dm_destroy_immediate(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int dm_open_count(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int dm_cancel_deferred_remove(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int dm_request_based(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			struct dm_dev **result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		      unsigned cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void dm_internal_suspend(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void dm_internal_resume(struct mapped_device *md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int dm_io_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void dm_io_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int dm_kcopyd_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void dm_kcopyd_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * Mempool operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 					    unsigned integrity, unsigned per_bio_data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					    unsigned min_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void dm_free_md_mempools(struct dm_md_mempools *pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * Various helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned dm_get_reserved_bio_based_ios(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif