Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * uptodate.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Tracking the up-to-date-ness of a local buffer_head with respect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2002, 2004, 2005 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Standard buffer head caching flags (uptodate, etc) are insufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * in a clustered environment - a buffer may be marked up to date on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * our local node but could have been modified by another cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * member. As a result an additional (and performant) caching scheme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * is required. A further requirement is that we consume as little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * memory as possible - we never pin buffer_head structures in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * to cache them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * We track the existence of up to date buffers on the inodes which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * are associated with them. Because we don't want to pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * buffer_heads, this is only a (strong) hint and several other checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * are made in the I/O path to ensure that we don't use a stale or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * invalid buffer without going to disk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *	- buffer_jbd is used liberally - if a bh is in the journal on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *	  this node then it *must* be up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	- the standard buffer_uptodate() macro is used to detect buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *	  which may be invalid (even if we have an up to date tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * 	  item for them)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * For a full understanding of how this code works together, one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * should read the callers in dlmglue.c, the I/O functions in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * buffer_head_io.c and ocfs2_journal_access in journal.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include "uptodate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct ocfs2_meta_cache_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct rb_node	c_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	sector_t	c_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static struct kmem_cache *ocfs2_uptodate_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return ci->ci_ops->co_owner(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return ci->ci_ops->co_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	ci->ci_ops->co_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ci->ci_ops->co_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	ci->ci_ops->co_io_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	ci->ci_ops->co_io_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				       int clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	ci->ci_num_cached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (clear) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		ci->ci_created_trans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		ci->ci_last_trans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			       const struct ocfs2_caching_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	BUG_ON(!ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	ci->ci_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	ocfs2_metadata_cache_reset(ci, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ocfs2_metadata_cache_purge(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	ocfs2_metadata_cache_reset(ci, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* No lock taken here as 'root' is not expected to be visible to other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * processes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned int purged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct ocfs2_meta_cache_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	while ((node = rb_last(root)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		trace_ocfs2_purge_copied_metadata_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 					(unsigned long long) item->c_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		rb_erase(&item->c_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		kmem_cache_free(ocfs2_uptodate_cachep, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		purged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return purged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Called from locking and called from ocfs2_clear_inode. Dump the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * cache for a given inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * This function is a few more lines longer than necessary due to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * accounting done here, but I think it's worth tracking down those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * bugs sooner -- Mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	unsigned int tree, to_purge, purged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct rb_root root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	BUG_ON(!ci || !ci->ci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	ocfs2_metadata_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	to_purge = ci->ci_num_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	trace_ocfs2_metadata_cache_purge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		to_purge, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/* If we're a tree, save off the root so that we can safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * initialize the cache. We do the work to free tree members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * without the spinlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		root = ci->ci_cache.ci_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	ocfs2_metadata_cache_reset(ci, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	purged = ocfs2_purge_copied_metadata_tree(&root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* If possible, track the number wiped so that we can more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	 * easily detect counting errors. Unfortunately, this is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 * meaningful for trees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (tree && purged != to_purge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		     (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		     to_purge, purged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Returns the index in the cache array, -1 if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Requires ip_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				    sector_t item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	for (i = 0; i < ci->ci_num_cached; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		if (item == ci->ci_cache.ci_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Returns the cache item if found, otherwise NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * Requires ip_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct ocfs2_meta_cache_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct ocfs2_meta_cache_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (block < item->c_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		else if (block > item->c_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			return item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			       struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	struct ocfs2_meta_cache_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ocfs2_metadata_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	trace_ocfs2_buffer_cached_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		(unsigned long long) bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		!!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		index = ocfs2_search_cache_array(ci, bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		item = ocfs2_search_cache_tree(ci, bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	trace_ocfs2_buffer_cached_end(index, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return (index != -1) || (item != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Warning: even if it returns true, this does *not* guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * the block is stored in our inode metadata cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * This can be called under lock_buffer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			  struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/* Doesn't matter if the bh is in our cache or not -- if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * not marked uptodate then we know it can't have correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	/* OCFS2 does not allow multiple nodes to be changing the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * block at the same time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (buffer_jbd(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	/* Ok, locally the buffer is marked as up to date, now search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * our cache to see if we can trust that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return ocfs2_buffer_cached(ci, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * Determine whether a buffer is currently out on a read-ahead request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * ci_io_sem should be held to serialize submitters with the logic here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			    struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Requires ip_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				     sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	trace_ocfs2_append_cache_array(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		(unsigned long long)block, ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	ci->ci_cache.ci_array[ci->ci_num_cached] = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	ci->ci_num_cached++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* By now the caller should have checked that the item does *not*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * exist in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * Requires ip_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				      struct ocfs2_meta_cache_item *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	sector_t block = new->c_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct ocfs2_meta_cache_item *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	trace_ocfs2_insert_cache_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		(unsigned long long)block, ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	while(*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (block < tmp->c_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		else if (block > tmp->c_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			/* This should never happen! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			mlog(ML_ERROR, "Duplicate block %llu cached!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			     (unsigned long long) block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	rb_link_node(&new->c_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	ci->ci_num_cached++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* co_cache_lock() must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		(ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * pointers in tree after we use them - this allows caller to detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * when to free in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * The co_cache_lock() must be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			       struct ocfs2_meta_cache_item **tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			"Owner %llu, num cached = %u, should be %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			"Owner %llu not marked as inline anymore!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			(unsigned long long)ocfs2_metadata_cache_owner(ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/* Be careful to initialize the tree members *first* because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * once the ci_tree is used, the array is junk... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		tree[i]->c_block = ci->ci_cache.ci_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	ci->ci_cache.ci_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/* this will be set again by __ocfs2_insert_cache_tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ci->ci_num_cached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		__ocfs2_insert_cache_tree(ci, tree[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		tree[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	trace_ocfs2_expand_cache(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		ci->ci_flags, ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Slow path function - memory allocation is necessary. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * comment above ocfs2_set_buffer_uptodate for more information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 					sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 					int expand_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct ocfs2_meta_cache_item *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		{ NULL, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	trace_ocfs2_set_buffer_uptodate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		(unsigned long long)block, expand_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		mlog_errno(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	new->c_block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (expand_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		/* Do *not* allocate an array here - the removal code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		 * has no way of tracking that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 						   GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			if (!tree[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 				mlog_errno(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 				goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			/* These are initialized in ocfs2_expand_cache! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	ocfs2_metadata_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	if (ocfs2_insert_can_use_array(ci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		/* Ok, items were removed from the cache in between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		 * locks. Detect this and revert back to the fast path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		ocfs2_append_cache_array(ci, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (expand_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		ocfs2_expand_cache(ci, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	__ocfs2_insert_cache_tree(ci, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		kmem_cache_free(ocfs2_uptodate_cachep, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	/* If these were used, then ocfs2_expand_cache re-set them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	 * NULL for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (tree[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			if (tree[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				kmem_cache_free(ocfs2_uptodate_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 						tree[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Item insertion is guarded by co_io_lock(), so the insertion path takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  * advantage of this by not rechecking for a duplicate insert during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  * the slow case. Additionally, if the cache needs to be bumped up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * a tree, the code will not recheck after acquiring the lock --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  * multiple paths cannot be expanding to a tree at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * The slow path takes into account that items can be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * (including the whole tree wiped and reset) when this process it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * allocating memory. In those cases, it reverts back to the fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * Note that this function may actually fail to insert the block if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * memory cannot be allocated. This is not fatal however (but may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * result in a performance penalty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * Readahead buffers can be passed in here before the I/O request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			       struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	int expand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	/* The block may very well exist in our cache already, so avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 * doing any more work in that case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	if (ocfs2_buffer_cached(ci, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	trace_ocfs2_set_buffer_uptodate_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		(unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	/* No need to recheck under spinlock - insertion is guarded by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	 * co_io_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	ocfs2_metadata_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (ocfs2_insert_can_use_array(ci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		/* Fast case - it's an array and there's a free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		 * spot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		ocfs2_append_cache_array(ci, bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	expand = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		/* We need to bump things up to a tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		expand = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	__ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Called against a newly allocated buffer. Most likely nobody should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)  * be able to read this sort of metadata while it's still being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  * allocated, but this is careful to take co_io_lock() anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 				   struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	/* This should definitely *not* exist in our cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	BUG_ON(ocfs2_buffer_cached(ci, bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	ocfs2_metadata_cache_io_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	ocfs2_set_buffer_uptodate(ci, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	ocfs2_metadata_cache_io_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Requires ip_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 					int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	sector_t *array = ci->ci_cache.ci_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	BUG_ON(index >= ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	BUG_ON(!ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	trace_ocfs2_remove_metadata_array(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		index, ci->ci_num_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	ci->ci_num_cached--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	/* don't need to copy if the array is now empty, or if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	 * removed at the tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (ci->ci_num_cached && index < ci->ci_num_cached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		memmove(&array[index], &array[index + 1], bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Requires ip_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 				       struct ocfs2_meta_cache_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	trace_ocfs2_remove_metadata_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		(unsigned long long)item->c_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	ci->ci_num_cached--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 					  sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	struct ocfs2_meta_cache_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	ocfs2_metadata_cache_lock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	trace_ocfs2_remove_block_from_cache(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		(unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		(unsigned long long) block, ci->ci_num_cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		ci->ci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		index = ocfs2_search_cache_array(ci, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		if (index != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			ocfs2_remove_metadata_array(ci, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		item = ocfs2_search_cache_tree(ci, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		if (item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			ocfs2_remove_metadata_tree(ci, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	ocfs2_metadata_cache_unlock(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	if (item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		kmem_cache_free(ocfs2_uptodate_cachep, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)  * Called when we remove a chunk of metadata from an inode. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)  * bother reverting things to an inlined array in the case of a remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)  * which moves us back under the limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 			     struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	sector_t block = bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	ocfs2_remove_block_from_cache(ci, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Called when we remove xattr clusters from an inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 					    sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 					    u32 c_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	for (i = 0; i < b_len; i++, block++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		ocfs2_remove_block_from_cache(ci, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int __init init_ocfs2_uptodate_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 				  sizeof(struct ocfs2_meta_cache_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 				  0, SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (!ocfs2_uptodate_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) void exit_ocfs2_uptodate_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	kmem_cache_destroy(ocfs2_uptodate_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }