Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) #ifndef _FS_CEPH_SUPER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #define _FS_CEPH_SUPER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/ceph/ceph_debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/exportfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/posix_acl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/ceph/libceph.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #ifdef CONFIG_CEPH_FSCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/fscache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) /* f_type in struct statfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define CEPH_SUPER_MAGIC 0x00c36400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* large granularity for statfs utilization stats to facilitate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * large volume sizes on 32-bit machines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define CEPH_BLOCK_SHIFT   22  /* 4 MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define CEPH_BLOCK         (1 << CEPH_BLOCK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define CEPH_MOUNT_OPT_CLEANRECOVER    (1<<1) /* auto reonnect (clean mode) after blocklisted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define CEPH_MOUNT_OPT_DIRSTAT         (1<<4) /* `cat dirname` for stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define CEPH_MOUNT_OPT_RBYTES          (1<<5) /* dir st_bytes = rbytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define CEPH_MOUNT_OPT_NOASYNCREADDIR  (1<<7) /* no dcache readdir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define CEPH_MOUNT_OPT_INO32           (1<<8) /* 32 bit inos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define CEPH_MOUNT_OPT_DCACHE          (1<<9) /* use dcache for readdir etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define CEPH_MOUNT_OPT_FSCACHE         (1<<10) /* use fscache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define CEPH_MOUNT_OPT_NOPOOLPERM      (1<<11) /* no pool permission check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define CEPH_MOUNT_OPT_MOUNTWAIT       (1<<12) /* mount waits if no mds is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define CEPH_MOUNT_OPT_NOQUOTADF       (1<<13) /* no root dir quota in statfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define CEPH_MOUNT_OPT_NOCOPYFROM      (1<<14) /* don't use RADOS 'copy-from' op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define CEPH_MOUNT_OPT_ASYNC_DIROPS    (1<<15) /* allow async directory ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define CEPH_MOUNT_OPT_DEFAULT			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	(CEPH_MOUNT_OPT_DCACHE |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	 CEPH_MOUNT_OPT_NOCOPYFROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define ceph_set_mount_opt(fsc, opt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define ceph_clear_mount_opt(fsc, opt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	(fsc)->mount_options->flags &= ~CEPH_MOUNT_OPT_##opt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define ceph_test_mount_opt(fsc, opt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	(!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* max size of osd read request, limited by libceph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define CEPH_MAX_READ_SIZE              CEPH_MSG_MAX_DATA_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) /* osd has a configurable limitaion of max write size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * CEPH_MSG_MAX_DATA_LEN should be small enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define CEPH_MAX_WRITE_SIZE		CEPH_MSG_MAX_DATA_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define CEPH_RASIZE_DEFAULT             (8192*1024)    /* max readahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define CEPH_MAX_READDIR_DEFAULT        1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define CEPH_MAX_READDIR_BYTES_DEFAULT  (512*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define CEPH_SNAPDIRNAME_DEFAULT        ".snap"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * Delay telling the MDS we no longer want caps, in case we reopen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * the file.  Delay a minimum amount of time, even if we send a cap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * message for some other reason.  Otherwise, take the oppotunity to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * update the mds to avoid sending another message later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT      5  /* cap release delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT     60  /* cap release delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) struct ceph_mount_options {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	unsigned int wsize;            /* max write size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	unsigned int rsize;            /* max read size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	unsigned int rasize;           /* max readahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	unsigned int congestion_kb;    /* max writeback in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	unsigned int caps_wanted_delay_min, caps_wanted_delay_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	int caps_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int max_readdir;       /* max readdir result (entries) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	unsigned int max_readdir_bytes; /* max readdir result (bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	 * everything above this point can be memcmp'd; everything below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * is handled in compare_mount_options()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	char *snapdir_name;   /* default ".snap" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	char *mds_namespace;  /* default NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	char *server_path;    /* default NULL (means "/") */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	char *fscache_uniq;   /* default NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) struct ceph_fs_client {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct super_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct list_head metric_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct ceph_mount_options *mount_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct ceph_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned long mount_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned long last_auto_reconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	bool blocklisted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	bool have_copy_from2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	u32 filp_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	loff_t max_file_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct ceph_mds_client *mdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	atomic_long_t writeback_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct workqueue_struct *inode_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	struct workqueue_struct *cap_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct dentry *debugfs_dentry_lru, *debugfs_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct dentry *debugfs_congestion_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct dentry *debugfs_bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct dentry *debugfs_mdsc, *debugfs_mdsmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct dentry *debugfs_metric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct dentry *debugfs_mds_sessions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #ifdef CONFIG_CEPH_FSCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	struct fscache_cookie *fscache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * File i/o capability.  This tracks shared state with the metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * server that allows us to cache or writeback attributes or to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * and write data.  For any given inode, we should have one or more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * capabilities, one issued by each metadata server, and our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * cumulative access is the OR of all issued capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * Each cap is referenced by the inode's i_caps rbtree and by per-mds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * session capability lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) struct ceph_cap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct ceph_inode_info *ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct rb_node ci_node;          /* per-ci cap tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct ceph_mds_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct list_head session_caps;   /* per-session caplist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	u64 cap_id;       /* unique cap id (mds provided) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		/* in-use caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			int issued;       /* latest, from the mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			int implemented;  /* implemented superset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 					     issued (for revocation) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			int mds;	  /* mds index for this cap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			int mds_wanted;   /* caps wanted from this mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		/* caps to release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			u64 cap_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			int queue_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u32 seq, issue_seq, mseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u32 cap_gen;      /* active/stale cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	unsigned long last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct list_head caps_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define CHECK_CAPS_AUTHONLY   1  /* only check auth cap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define CHECK_CAPS_FLUSH      2  /* flush any dirty caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define CHECK_CAPS_NOINVAL    4  /* don't invalidate pagecache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) struct ceph_cap_flush {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	u64 tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	int caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	bool wake; /* wake up flush waiters when finish ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	bool is_capsnap; /* true means capsnap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	struct list_head g_list; // global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	struct list_head i_list; // per inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Snapped cap state that is pending flush to mds.  When a snapshot occurs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * we first complete any in-process sync writes and writeback any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * data before flushing the snapped state (tracked here) back to the MDS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) struct ceph_cap_snap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	refcount_t nref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct list_head ci_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	struct ceph_cap_flush cap_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	u64 follows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	int issued, dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct ceph_snap_context *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct ceph_buffer *xattr_blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	u64 xattr_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	u64 change_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct timespec64 mtime, atime, ctime, btime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u64 time_warp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u64 truncate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	u32 truncate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	int writing;   /* a sync write is still in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	int dirty_pages;     /* dirty pages awaiting writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	bool inline_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	bool need_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (refcount_dec_and_test(&capsnap->nref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (capsnap->xattr_blob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			ceph_buffer_put(capsnap->xattr_blob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		kfree(capsnap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * The frag tree describes how a directory is fragmented, potentially across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * multiple metadata servers.  It is also used to indicate points where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * metadata authority is delegated, and whether/where metadata is replicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * A _leaf_ frag will be present in the i_fragtree IFF there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * delegation info.  That is, if mds >= 0 || ndist > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define CEPH_MAX_DIRFRAG_REP 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) struct ceph_inode_frag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* fragtree state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	int split_by;         /* i.e. 2^(split_by) children */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	/* delegation and replication info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	int mds;              /* -1 if same authority as parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	int ndist;            /* >0 if replicated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	int dist[CEPH_MAX_DIRFRAG_REP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * We cache inode xattrs as an encoded blob until they are first used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * at which point we parse them into an rbtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) struct ceph_inode_xattr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	const char *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	int val_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	int dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	int should_free_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	int should_free_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * Ceph dentry state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) struct ceph_dentry_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	struct ceph_mds_session *lease_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct list_head lease_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	unsigned flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	int lease_shared_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	u32 lease_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	u32 lease_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	unsigned long lease_renew_after, lease_renew_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	unsigned long time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) #define CEPH_DENTRY_REFERENCED		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) #define CEPH_DENTRY_LEASE_LIST		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #define CEPH_DENTRY_SHRINK_LIST		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) #define CEPH_DENTRY_PRIMARY_LINK	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) struct ceph_inode_xattrs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 * (still encoded) xattr blob. we avoid the overhead of parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 * this until someone actually calls getxattr, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * blob->vec.iov_len == 4 implies there are no xattrs; blob ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * NULL means we don't know.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct ceph_buffer *blob, *prealloc_blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct rb_root index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	bool dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	int names_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	int vals_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	u64 version, index_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * Ceph inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) struct ceph_inode_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct ceph_vino i_vino;   /* ceph ino + snap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	spinlock_t i_ceph_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	u64 i_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	u64 i_inline_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	u32 i_time_warp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	unsigned long i_ceph_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	atomic64_t i_release_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	atomic64_t i_ordered_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	atomic64_t i_complete_seq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	struct ceph_dir_layout i_dir_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct ceph_file_layout i_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct ceph_file_layout i_cached_layout;	// for async creates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	char *i_symlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* for dirs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	struct timespec64 i_rctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	u64 i_rbytes, i_rfiles, i_rsubdirs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	u64 i_files, i_subdirs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	/* quotas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	u64 i_max_bytes, i_max_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	s32 i_dir_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct rb_root i_fragtree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	int i_fragtree_nsplits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct mutex i_fragtree_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct ceph_inode_xattrs_info i_xattrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* capabilities.  protected _both_ by i_ceph_lock and cap->session's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * s_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct rb_root i_caps;           /* cap list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	unsigned i_dirty_caps, i_flushing_caps;     /* mask of dirtied fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * is protected by the mdsc->cap_dirty_lock, but each individual item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * requires the mdsc->cap_dirty_lock. List presence for an item can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * be tested under the i_ceph_lock. Changing anything requires both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct list_head i_dirty_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 * Link to session's s_cap_flushing list. Protected in a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	 * fashion to i_dirty_item, but also by the s_mutex for changes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * s_cap_flushing list can be walked while holding either the s_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * or msdc->cap_dirty_lock. List presence can also be checked while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * holding the i_ceph_lock for this inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct list_head i_flushing_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/* we need to track cap writeback on a per-cap-bit basis, to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 * overlapping, pipelined cap flushes to the mds.  we can probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 * reduce the tid to 8 bits if we're concerned about inode size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct ceph_cap_flush *i_prealloc_cap_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	struct list_head i_cap_flush_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	wait_queue_head_t i_cap_wq;      /* threads waiting on a capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	unsigned long i_hold_caps_max; /* jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct list_head i_cap_delay_list;  /* for delayed cap release to mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct ceph_cap_reservation i_cap_migration_resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct list_head i_cap_snaps;   /* snapped state pending flush to mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct ceph_snap_context *i_head_snapc;  /* set if wr_buffer_head > 0 or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 						    dirty|flushing caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	unsigned i_snap_caps;           /* cap bits for snapped files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	unsigned long i_last_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	unsigned long i_last_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	int i_nr_by_mode[CEPH_FILE_MODE_BITS];  /* open file counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct mutex i_truncate_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	u32 i_truncate_seq;        /* last truncate to smaller size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	u64 i_truncate_size;       /*  and the size we last truncated down to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	int i_truncate_pending;    /*  still need to call vmtruncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	u64 i_max_size;            /* max file size authorized by mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	u64 i_reported_size; /* (max_)size reported to or requested of mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	u64 i_wanted_max_size;     /* offset we'd like to write too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	u64 i_requested_max_size;  /* max_size we've requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	/* held references to caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int i_pin_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref, i_fx_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	int i_wrbuffer_ref, i_wrbuffer_ref_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	atomic_t i_filelock_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	atomic_t i_shared_gen;       /* increment each time we get FILE_SHARED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	u32 i_rdcache_gen;      /* incremented each time we get FILE_CACHE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct list_head i_unsafe_iops;   /* uncommitted mds inode ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	spinlock_t i_unsafe_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int i_snap_realm_counter; /* snap realm (if caps) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct list_head i_snap_realm_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct list_head i_snap_flush_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct timespec64 i_btime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct timespec64 i_snap_btime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct work_struct i_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	unsigned long  i_work_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) #ifdef CONFIG_CEPH_FSCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	struct fscache_cookie *fscache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	u32 i_fscache_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct inode vfs_inode; /* at end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static inline struct ceph_inode_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) ceph_inode(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return container_of(inode, struct ceph_inode_info, vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static inline struct ceph_fs_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) ceph_inode_to_client(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static inline struct ceph_fs_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) ceph_sb_to_client(const struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	return (struct ceph_fs_client *)sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) static inline struct ceph_mds_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) ceph_sb_to_mdsc(const struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static inline struct ceph_vino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) ceph_vino(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return ceph_inode(inode)->i_vino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static inline u32 ceph_ino_to_ino32(u64 vino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	u32 ino = vino & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	ino ^= vino >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (!ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		ino = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	return ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * some arches. We generally do not use this value inside the ceph driver, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * we do want to set it to something, so that generic vfs code has an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * appropriate value for tracepoints and the like.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (sizeof(ino_t) == sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		return ceph_ino_to_ino32(vino.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return (ino_t)vino.ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) /* for printf-style formatting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) static inline u64 ceph_ino(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	return ceph_inode(inode)->i_vino.ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static inline u64 ceph_snap(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return ceph_inode(inode)->i_vino.snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * ceph_present_ino - format an inode number for presentation to userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  * @sb: superblock where the inode lives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * @ino: inode number to (possibly) convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * If the user mounted with the ino32 option, then the 64-bit value needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * to be converted to something that can fit inside 32 bits. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * internal kernel code never uses this value, so this is entirely for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * userland consumption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return ceph_ino_to_ino32(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static inline u64 ceph_present_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	return ceph_present_ino(inode->i_sb, ceph_ino(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static inline int ceph_ino_compare(struct inode *inode, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct ceph_vino *pvino = (struct ceph_vino *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct ceph_inode_info *ci = ceph_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	return ci->i_vino.ino == pvino->ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		ci->i_vino.snap == pvino->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  * The MDS reserves a set of inodes for its own usage. These should never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * be accessible by clients, and so the MDS has no reason to ever hand these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  * These come from src/mds/mdstypes.h in the ceph sources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) #define CEPH_MAX_MDS		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) #define CEPH_NUM_STRAY		10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) #define CEPH_MDS_INO_MDSDIR_OFFSET	(1 * CEPH_MAX_MDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) #define CEPH_INO_SYSTEM_BASE		((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (vino.ino < CEPH_INO_SYSTEM_BASE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	    vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static inline struct inode *ceph_find_inode(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 					    struct ceph_vino vino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (ceph_vino_is_reserved(vino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 * NB: The hashval will be run through the fs/inode.c hash function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 * anyway, so there is no need to squash the inode number down to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	 * 32-bits first. Just use low-order bits on arches with 32-bit long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * Ceph inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) #define CEPH_I_DIR_ORDERED	(1 << 0)  /* dentries in dir are ordered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) #define CEPH_I_FLUSH		(1 << 2)  /* do not delay flush of dirty metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) #define CEPH_I_POOL_PERM	(1 << 3)  /* pool rd/wr bits are valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #define CEPH_I_POOL_RD		(1 << 4)  /* can read from pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) #define CEPH_I_POOL_WR		(1 << 5)  /* can write to pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) #define CEPH_I_SEC_INITED	(1 << 6)  /* security initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) #define CEPH_I_KICK_FLUSH	(1 << 7)  /* kick flushing caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) #define CEPH_I_FLUSH_SNAPS	(1 << 8)  /* need flush snapss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) #define CEPH_I_ERROR_WRITE	(1 << 9) /* have seen write errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) #define CEPH_I_ERROR_FILELOCK	(1 << 10) /* have seen file lock errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) #define CEPH_I_ODIRECT		(1 << 11) /* inode in direct I/O mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) #define CEPH_ASYNC_CREATE_BIT	(12)	  /* async create in flight for this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) #define CEPH_I_ASYNC_CREATE	(1 << CEPH_ASYNC_CREATE_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * Masks of ceph inode work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #define CEPH_I_WORK_WRITEBACK		0 /* writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) #define CEPH_I_WORK_INVALIDATE_PAGES	1 /* invalidate pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) #define CEPH_I_WORK_VMTRUNCATE		2 /* vmtruncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * We set the ERROR_WRITE bit when we start seeing write errors on an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * and then clear it when they start succeeding. Note that we do a lockless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * check first, and only take the lock if it looks like it needs to be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * The write submission code just takes this as a hint, so we're not too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * worried if a few slip through in either direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static inline void ceph_set_error_write(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		ci->i_ceph_flags |= CEPH_I_ERROR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static inline void ceph_clear_error_write(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		ci->i_ceph_flags &= ~CEPH_I_ERROR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 					   long long release_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 					   long long ordered_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * Makes sure operations that setup readdir cache (update page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * cache and i_size) are strongly ordered w.r.t. the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * atomic64_set() operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	atomic64_set(&ci->i_complete_seq[0], release_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	atomic64_set(&ci->i_complete_seq[1], ordered_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	atomic64_inc(&ci->i_release_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	atomic64_inc(&ci->i_ordered_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	return atomic64_read(&ci->i_complete_seq[0]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		atomic64_read(&ci->i_release_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	return  atomic64_read(&ci->i_complete_seq[0]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		atomic64_read(&ci->i_release_count) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		atomic64_read(&ci->i_complete_seq[1]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		atomic64_read(&ci->i_ordered_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static inline void ceph_dir_clear_complete(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	__ceph_dir_clear_complete(ceph_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static inline void ceph_dir_clear_ordered(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	__ceph_dir_clear_ordered(ceph_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) /* find a specific frag @f */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 						u32 f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * choose fragment for value @v.  copy frag content to pfrag, if leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			    struct ceph_inode_frag *pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			    int *found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	return (struct ceph_dentry_info *)dentry->d_fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  * caps helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return !RB_EMPTY_ROOT(&ci->i_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 					  int t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				    struct ceph_cap *cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static inline int ceph_caps_issued(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int issued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	issued = __ceph_caps_issued(ci, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	return issued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 					       int mask, int touch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	r = __ceph_caps_issued_mask_metric(ci, mask, touch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	return ci->i_dirty_caps | ci->i_flushing_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) extern struct ceph_cap_flush *ceph_alloc_cap_flush(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) extern void ceph_free_cap_flush(struct ceph_cap_flush *cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				  struct ceph_cap_flush **pcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				      struct ceph_cap *ocap, int mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) extern int __ceph_caps_used(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return ci->i_nr_by_mode[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) extern int __ceph_caps_wanted(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) /* what the mds thinks we want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) extern void ceph_caps_init(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 				     struct ceph_mount_options *fsopt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			     struct ceph_cap_reservation *ctx, int need);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			       struct ceph_cap_reservation *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) extern void ceph_reservation_status(struct ceph_fs_client *client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				    int *total, int *avail, int *used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				    int *reserved, int *min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * we keep buffered readdir results attached to file->private_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) #define CEPH_F_SYNC     1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) #define CEPH_F_ATEND    2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) struct ceph_file_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	short fmode;     /* initialized on open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	short flags;     /* CEPH_F_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	spinlock_t rw_contexts_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	struct list_head rw_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	u32 filp_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	atomic_t num_locks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) struct ceph_dir_file_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct ceph_file_info file_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	/* readdir: position within the dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	u32 frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct ceph_mds_request *last_readdir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	/* readdir: position within a frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	unsigned next_offset;  /* offset of next chunk (last_name's + 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	char *last_name;       /* last entry in previous chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	long long dir_release_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	long long dir_ordered_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	int readdir_cache_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	/* used for -o dirstat read() on directory thing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	char *dir_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	int dir_info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) struct ceph_rw_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	struct task_struct *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	int caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) #define CEPH_DEFINE_RW_CONTEXT(_name, _caps)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct ceph_rw_context _name = {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		.thread = current,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		.caps = _caps,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) static inline void ceph_add_rw_context(struct ceph_file_info *cf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 				       struct ceph_rw_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	spin_lock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	list_add(&ctx->list, &cf->rw_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	spin_unlock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static inline void ceph_del_rw_context(struct ceph_file_info *cf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				       struct ceph_rw_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	spin_lock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	list_del(&ctx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	spin_unlock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static inline struct ceph_rw_context*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) ceph_find_rw_context(struct ceph_file_info *cf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct ceph_rw_context *ctx, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	spin_lock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	list_for_each_entry(ctx, &cf->rw_contexts, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (ctx->thread == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			found = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	spin_unlock(&cf->rw_contexts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) struct ceph_readdir_cache_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct page  *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct dentry **dentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  * A "snap realm" describes a subset of the file hierarchy sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  * the same set of snapshots that apply to it.  The realms themselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * are organized into a hierarchy, such that children inherit (some of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * the snapshots of their parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * All inodes within the realm that have capabilities are linked into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * per-realm list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) struct ceph_snap_realm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	atomic_t nref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	u64 created, seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	u64 parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	u64 parent_since;   /* snapid when our current parent became so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	u64 *prior_parent_snaps;      /* snaps inherited from any parents we */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	u32 num_prior_parent_snaps;   /*  had prior to parent_since */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	u64 *snaps;                   /* snaps specific to this realm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	u32 num_snaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct ceph_snap_realm *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct list_head children;       /* list of child realms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct list_head child_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	struct list_head empty_item;     /* if i have ref==0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct list_head dirty_item;     /* if realm needs new context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* the current set of snaps for this realm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct ceph_snap_context *cached_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct list_head inodes_with_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	spinlock_t inodes_with_caps_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static inline int default_congestion_kb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	int congestion_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	 * Copied from NFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	 * congestion size, scale with available memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	 *  64MB:    8192k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	 * 128MB:   11585k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	 * 256MB:   16384k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	 * 512MB:   23170k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 *   1GB:   32768k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 *   2GB:   46340k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 *   4GB:   65536k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 *   8GB:   92681k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 *  16GB:  131072k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 * This allows larger machines to have larger/more transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 * Limit the default to 256M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (congestion_kb > 256*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		congestion_kb = 256*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	return congestion_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) /* super.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) extern int ceph_force_reconnect(struct super_block *sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) /* snap.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 					       u64 ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				struct ceph_snap_realm *realm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 				struct ceph_snap_realm *realm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) extern int ceph_update_snap_trace(struct ceph_mds_client *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 				  void *p, void *e, bool deletion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				  struct ceph_snap_realm **realm_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			     struct ceph_mds_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			     struct ceph_msg *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 				  struct ceph_cap_snap *capsnap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 						   u64 snap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				struct ceph_snapid_map *sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * a cap_snap is "pending" if it is still awaiting an in-progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * sync write (that may/may not still update size, mtime, etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return !list_empty(&ci->i_cap_snaps) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	       list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			       ci_item)->writing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) /* inode.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) struct ceph_mds_reply_info_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) struct ceph_mds_reply_dirfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) extern const struct inode_operations ceph_file_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) extern struct inode *ceph_alloc_inode(struct super_block *sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) extern void ceph_evict_inode(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) extern void ceph_free_inode(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) extern struct inode *ceph_get_inode(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 				    struct ceph_vino vino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) extern struct inode *ceph_get_snapdir(struct inode *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) extern int ceph_fill_file_size(struct inode *inode, int issued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			       u32 truncate_seq, u64 truncate_size, u64 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) extern void ceph_fill_file_time(struct inode *inode, int issued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 				u64 time_warp_seq, struct timespec64 *ctime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				struct timespec64 *mtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				struct timespec64 *atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) extern int ceph_fill_inode(struct inode *inode, struct page *locked_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		    struct ceph_mds_reply_info_in *iinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		    struct ceph_mds_reply_dirfrag *dirinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		    struct ceph_mds_session *session, int cap_fmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		    struct ceph_cap_reservation *caps_reservation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) extern int ceph_fill_trace(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			   struct ceph_mds_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				    struct ceph_mds_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) extern int ceph_inode_holds_cap(struct inode *inode, int mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) extern void __ceph_do_pending_vmtruncate(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) extern void ceph_queue_vmtruncate(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) extern void ceph_queue_invalidate(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) extern void ceph_queue_writeback(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) extern void ceph_async_iput(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			     int mask, bool force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	return __ceph_do_getattr(inode, NULL, mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) extern int ceph_permission(struct inode *inode, int mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) extern int ceph_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			u32 request_mask, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* xattr.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) extern const struct xattr_handler *ceph_xattr_handlers[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ceph_acl_sec_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #ifdef CONFIG_CEPH_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	void *default_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	void *acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	void *sec_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	u32 sec_ctxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct ceph_pagelist *pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #ifdef CONFIG_SECURITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) extern bool ceph_security_xattr_deadlock(struct inode *in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) extern bool ceph_security_xattr_wanted(struct inode *in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static inline bool ceph_security_xattr_deadlock(struct inode *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static inline bool ceph_security_xattr_wanted(struct inode *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 				     struct ceph_acl_sec_ctx *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static inline void ceph_security_invalidate_secctx(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	security_inode_invalidate_secctx(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 					    struct ceph_acl_sec_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static inline void ceph_security_invalidate_secctx(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* acl.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #ifdef CONFIG_CEPH_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct posix_acl *ceph_get_acl(struct inode *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		       struct ceph_acl_sec_ctx *as_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) void ceph_init_inode_acls(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			  struct ceph_acl_sec_ctx *as_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static inline void ceph_forget_all_cached_acls(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)        forget_all_cached_acls(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #define ceph_get_acl NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #define ceph_set_acl NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static inline int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 				     struct ceph_acl_sec_ctx *as_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static inline void ceph_init_inode_acls(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 					struct ceph_acl_sec_ctx *as_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static inline void ceph_forget_all_cached_acls(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* caps.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) extern const char *ceph_cap_string(int c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) extern void ceph_handle_caps(struct ceph_mds_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			     struct ceph_msg *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) extern struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				     struct ceph_cap_reservation *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) extern void ceph_add_cap(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			 struct ceph_mds_session *session, u64 cap_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			 unsigned issued, unsigned wanted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			 unsigned cap, unsigned seq, u64 realmino, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			 struct ceph_cap **new_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) extern void __ceph_remove_caps(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) extern void ceph_put_cap(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			 struct ceph_cap *cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) extern int ceph_is_any_caps(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		      int datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 					  struct ceph_mds_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 				    struct ceph_mds_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 				   struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 					     int mds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				bool snap_rwsem_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 					    int had);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				       struct ceph_snap_context *snapc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) extern void __ceph_remove_capsnap(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				  struct ceph_cap_snap *capsnap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				  bool *wake_ci, bool *wake_mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) extern void ceph_remove_capsnap(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				struct ceph_cap_snap *capsnap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 				bool *wake_ci, bool *wake_mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) extern void ceph_flush_snaps(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			     struct ceph_mds_session **psession);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			    struct ceph_mds_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) extern int  ceph_drop_caps_for_unlink(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) extern int ceph_encode_inode_release(void **p, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				     int mds, int drop, int unless, int force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 				      struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				      int mds, int drop, int unless);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) extern int ceph_get_caps(struct file *filp, int need, int want,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			 loff_t endoff, int *got, struct page **pinned_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) extern int ceph_try_get_caps(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			     int need, int want, bool nonblock, int *got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* for counting open files by mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) extern void ceph_get_fmode(struct ceph_inode_info *ci, int mode, int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode, int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			       struct ceph_mds_client *mdsc, int fmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* addr.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) extern const struct address_space_operations ceph_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) extern int ceph_pool_perm_check(struct inode *inode, int need);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* file.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) extern const struct file_operations ceph_file_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) extern int ceph_renew_caps(struct inode *inode, int fmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) extern int ceph_open(struct inode *inode, struct file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			    struct file *file, unsigned flags, umode_t mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) extern int ceph_release(struct inode *inode, struct file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				  char *data, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* dir.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) extern const struct file_operations ceph_dir_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) extern const struct file_operations ceph_snapdir_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) extern const struct inode_operations ceph_dir_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) extern const struct inode_operations ceph_snapdir_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) extern const struct dentry_operations ceph_dentry_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) extern int ceph_handle_snapdir(struct ceph_mds_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			       struct dentry *dentry, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 					 struct dentry *dentry, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) extern int ceph_trim_dentries(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* ioctl.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* export.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) extern const struct export_operations ceph_export_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* locks.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) extern __init void ceph_flock_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) extern int ceph_encode_locks_to_buffer(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 				       struct ceph_filelock *flocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				       int num_fcntl_locks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 				       int num_flock_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 				  struct ceph_pagelist *pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 				  int num_fcntl_locks, int num_flock_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* debugfs.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* quota.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	return ci->i_max_files || ci->i_max_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static inline void __ceph_update_quota(struct ceph_inode_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 				       u64 max_bytes, u64 max_files)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	bool had_quota, has_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	had_quota = __ceph_has_any_quota(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	ci->i_max_bytes = max_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	ci->i_max_files = max_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	has_quota = __ceph_has_any_quota(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (had_quota != has_quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			      struct ceph_mds_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			      struct ceph_msg *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) extern bool ceph_quota_is_max_files_exceeded(struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 					     loff_t newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 						loff_t newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				     struct kstatfs *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) extern int ceph_quota_check_rename(struct ceph_mds_client *mdsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				   struct inode *old, struct inode *new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) #endif /* _FS_CEPH_SUPER_H */