Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2011 Fujitsu.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Written by Miao Xie <miaox@cn.fujitsu.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/iversion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include "misc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include "delayed-inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "qgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "locking.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define BTRFS_DELAYED_WRITEBACK		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #define BTRFS_DELAYED_BACKGROUND	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #define BTRFS_DELAYED_BATCH		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static struct kmem_cache *delayed_node_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) int __init btrfs_delayed_inode_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 					sizeof(struct btrfs_delayed_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 					SLAB_MEM_SPREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 					NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	if (!delayed_node_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) void __cold btrfs_delayed_inode_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	kmem_cache_destroy(delayed_node_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static inline void btrfs_init_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 				struct btrfs_delayed_node *delayed_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 				struct btrfs_root *root, u64 inode_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	delayed_node->root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	delayed_node->inode_id = inode_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	refcount_set(&delayed_node->refs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	delayed_node->ins_root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	delayed_node->del_root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	mutex_init(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	INIT_LIST_HEAD(&delayed_node->n_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	INIT_LIST_HEAD(&delayed_node->p_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static inline int btrfs_is_continuous_delayed_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 					struct btrfs_delayed_item *item1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 					struct btrfs_delayed_item *item2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	    item1->key.objectid == item2->key.objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	    item1->key.type == item2->key.type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	    item1->key.offset + 1 == item2->key.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static struct btrfs_delayed_node *btrfs_get_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		struct btrfs_inode *btrfs_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct btrfs_root *root = btrfs_inode->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	u64 ino = btrfs_ino(btrfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct btrfs_delayed_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	node = READ_ONCE(btrfs_inode->delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		refcount_inc(&node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	spin_lock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		if (btrfs_inode->delayed_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 			refcount_inc(&node->refs);	/* can be accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			BUG_ON(btrfs_inode->delayed_node != node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 			return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		 * It's possible that we're racing into the middle of removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		 * this node from the radix tree.  In this case, the refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		 * was zero and it should never go back to one.  Just return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		 * NULL like it was never in the radix at all; our release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		 * function is in the process of removing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		 * Some implementations of refcount_inc refuse to bump the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		 * refcount once it has hit zero.  If we don't do this dance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		 * of actually bumping the refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		 * If this node is properly in the radix, we want to bump the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		 * refcount twice, once for the inode and once for this get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		if (refcount_inc_not_zero(&node->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			refcount_inc(&node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			btrfs_inode->delayed_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) /* Will return either the node or PTR_ERR(-ENOMEM) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		struct btrfs_inode *btrfs_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct btrfs_delayed_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct btrfs_root *root = btrfs_inode->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	u64 ino = btrfs_ino(btrfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	node = btrfs_get_delayed_node(btrfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	btrfs_init_delayed_node(node, root, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/* cached in the btrfs inode and can be accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	refcount_set(&node->refs, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	ret = radix_tree_preload(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		kmem_cache_free(delayed_node_cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	spin_lock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	if (ret == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		kmem_cache_free(delayed_node_cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	btrfs_inode->delayed_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * Call it when holding delayed_node->mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * If mod = 1, add this node into the prepared list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				     struct btrfs_delayed_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				     int mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	spin_lock(&root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		if (!list_empty(&node->p_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			list_move_tail(&node->p_list, &root->prepare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		else if (mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			list_add_tail(&node->p_list, &root->prepare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		list_add_tail(&node->n_list, &root->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		list_add_tail(&node->p_list, &root->prepare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		refcount_inc(&node->refs);	/* inserted into list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		root->nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	spin_unlock(&root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) /* Call it when holding delayed_node->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 				       struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	spin_lock(&root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		root->nodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		refcount_dec(&node->refs);	/* not in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		list_del_init(&node->n_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		if (!list_empty(&node->p_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			list_del_init(&node->p_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	spin_unlock(&root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static struct btrfs_delayed_node *btrfs_first_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			struct btrfs_delayed_root *delayed_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct btrfs_delayed_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	spin_lock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (list_empty(&delayed_root->node_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	p = delayed_root->node_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	node = list_entry(p, struct btrfs_delayed_node, n_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	refcount_inc(&node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	spin_unlock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static struct btrfs_delayed_node *btrfs_next_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 						struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	struct btrfs_delayed_node *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	delayed_root = node->root->fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	spin_lock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		/* not in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		if (list_empty(&delayed_root->node_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		p = delayed_root->node_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		p = node->n_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	next = list_entry(p, struct btrfs_delayed_node, n_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	refcount_inc(&next->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	spin_unlock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static void __btrfs_release_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 				struct btrfs_delayed_node *delayed_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 				int mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	delayed_root = delayed_node->root->fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (delayed_node->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (refcount_dec_and_test(&delayed_node->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		struct btrfs_root *root = delayed_node->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		spin_lock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 * Once our refcount goes to zero, nobody is allowed to bump it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		 * back up.  We can delete it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		ASSERT(refcount_read(&delayed_node->refs) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		radix_tree_delete(&root->delayed_nodes_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 				  delayed_node->inode_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		kmem_cache_free(delayed_node_cache, delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	__btrfs_release_delayed_node(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 					struct btrfs_delayed_root *delayed_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	struct btrfs_delayed_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	spin_lock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (list_empty(&delayed_root->prepare_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	p = delayed_root->prepare_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	list_del_init(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	node = list_entry(p, struct btrfs_delayed_node, p_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	refcount_inc(&node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	spin_unlock(&delayed_root->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static inline void btrfs_release_prepared_delayed_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 					struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	__btrfs_release_delayed_node(node, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	struct btrfs_delayed_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		item->data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		item->ins_or_del = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		item->bytes_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		item->delayed_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		refcount_set(&item->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	return item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * __btrfs_lookup_delayed_item - look up the delayed item by key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * @delayed_node: pointer to the delayed node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * @key:	  the key to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * @prev:	  used to store the prev item if the right item isn't found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * @next:	  used to store the next item if the right item isn't found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * Note: if we don't find the right item, we will return the prev item and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * the next item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 				struct btrfs_delayed_item **prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 				struct btrfs_delayed_item **next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct rb_node *node, *prev_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct btrfs_delayed_item *delayed_item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	node = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 					rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		prev_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			return delayed_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (!prev_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			*prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			*prev = delayed_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		else if ((node = rb_prev(prev_node)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			*prev = rb_entry(node, struct btrfs_delayed_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 					 rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			*prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		if (!prev_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			*next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			*next = delayed_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		else if ((node = rb_next(prev_node)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			*next = rb_entry(node, struct btrfs_delayed_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 					 rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			*next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 					struct btrfs_delayed_node *delayed_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 					struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 					   NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				    struct btrfs_delayed_item *ins,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				    int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct rb_node **p, *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct rb_root_cached *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct btrfs_delayed_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		root = &delayed_node->ins_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		root = &delayed_node->del_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	p = &root->rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	node = &ins->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		item = rb_entry(parent_node, struct btrfs_delayed_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				 rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		if (cmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		} else if (cmp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	rb_link_node(node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	rb_insert_color_cached(node, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	ins->delayed_node = delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	ins->ins_or_del = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	    ins->key.offset >= delayed_node->index_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			delayed_node->index_cnt = ins->key.offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	delayed_node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 					      struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	return __btrfs_add_delayed_item(node, item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 					BTRFS_DELAYED_INSERTION_ITEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 					     struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	return __btrfs_add_delayed_item(node, item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 					BTRFS_DELAYED_DELETION_ITEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) static void finish_one_item(struct btrfs_delayed_root *delayed_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	int seq = atomic_inc_return(&delayed_root->items_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	/* atomic_dec_return implies a barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if ((atomic_dec_return(&delayed_root->items) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		cond_wake_up_nomb(&delayed_root->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct rb_root_cached *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	/* Not associated with any delayed_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (!delayed_item->delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	BUG_ON(!delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		root = &delayed_item->delayed_node->ins_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		root = &delayed_item->delayed_node->del_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	rb_erase_cached(&delayed_item->rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	delayed_item->delayed_node->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	finish_one_item(delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		__btrfs_remove_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		if (refcount_dec_and_test(&item->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			kfree(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 					struct btrfs_delayed_node *delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct btrfs_delayed_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	p = rb_first_cached(&delayed_node->ins_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 					struct btrfs_delayed_node *delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct btrfs_delayed_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	p = rb_first_cached(&delayed_node->del_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	return item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static struct btrfs_delayed_item *__btrfs_next_delayed_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 						struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct btrfs_delayed_item *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	p = rb_next(&item->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					       struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 					       struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct btrfs_block_rsv *src_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct btrfs_block_rsv *dst_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (!trans->bytes_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	src_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	dst_rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * Here we migrate space rsv from transaction rsv, since have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * reserved space when starting a transaction.  So no need to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * qgroup space here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		trace_btrfs_space_reservation(fs_info, "delayed_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 					      item->key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 					      num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		item->bytes_reserved = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 						struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct btrfs_block_rsv *rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (!item->bytes_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	 * to release/reserve qgroup space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	trace_btrfs_space_reservation(fs_info, "delayed_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				      item->key.objectid, item->bytes_reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 				      0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) static int btrfs_delayed_inode_reserve_metadata(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 					struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 					struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 					struct btrfs_inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 					struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct btrfs_block_rsv *src_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct btrfs_block_rsv *dst_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	src_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	dst_rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 * which doesn't reserve space for speed.  This is a problem since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * still need to reserve space for this update, so try to reserve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * we always reserve enough to update the inode item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (!src_rsv || (!trans->bytes_reserved &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 					  BTRFS_RESERVE_NO_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 * Since we're under a transaction reserve_metadata_bytes could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		 * try to commit the transaction which will make it return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		 * EAGAIN to make us stop the transaction we have, so return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			node->bytes_reserved = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			trace_btrfs_space_reservation(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 						      "delayed_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 						      btrfs_ino(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 						      num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 					      btrfs_ino(inode), num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		node->bytes_reserved = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 						struct btrfs_delayed_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 						bool qgroup_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct btrfs_block_rsv *rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (!node->bytes_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				      node->inode_id, node->bytes_reserved, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (qgroup_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		btrfs_qgroup_free_meta_prealloc(node->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				node->bytes_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		btrfs_qgroup_convert_reserved_meta(node->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 				node->bytes_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	node->bytes_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * This helper will insert some continuous items into the same leaf according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * to the free space of the leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static int btrfs_batch_insert_items(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 				    struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				    struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct btrfs_delayed_item *curr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	int free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	int total_data_size = 0, total_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	char *data_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct btrfs_key *keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	u32 *data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	int nitems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	BUG_ON(!path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	free_space = btrfs_leaf_free_space(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	INIT_LIST_HEAD(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	next = item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	nitems = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 * count the number of the continuous items that we can insert in batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	       free_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		total_data_size += next->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		total_size += next->data_len + sizeof(struct btrfs_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		list_add_tail(&next->tree_list, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		nitems++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		curr = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		next = __btrfs_next_delayed_item(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		if (!btrfs_is_continuous_delayed_item(curr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (!nitems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * we need allocate some memory space, but it might cause the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * to sleep, so we set all locked nodes in the path to blocking locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	btrfs_set_path_blocking(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (!keys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (!data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/* get keys of all the delayed items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	list_for_each_entry(next, &head, tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		keys[i] = next->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		data_size[i] = next->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* insert the keys of the items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	setup_items_for_insert(root, path, keys, data_size, nitems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/* insert the dir index items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	list_for_each_entry_safe(curr, next, &head, tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		data_ptr = btrfs_item_ptr(leaf, slot, char);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		write_extent_buffer(leaf, &curr->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 				    (unsigned long)data_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				    curr->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		btrfs_delayed_item_release_metadata(root, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		list_del(&curr->tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		btrfs_release_delayed_item(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	kfree(data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	kfree(keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  * This helper can just do simple insertion that needn't extend item for new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * data, such as directory name index insertion, inode insertion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				     struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 				     struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				     struct btrfs_delayed_item *delayed_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	unsigned int nofs_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	nofs_flag = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 				      delayed_item->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	memalloc_nofs_restore(nofs_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (ret < 0 && ret != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			    delayed_item->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	btrfs_mark_buffer_dirty(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	btrfs_delayed_item_release_metadata(root, delayed_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  * we insert an item first, then if there are some continuous items, we try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * to insert those items into the same leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				      struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				      struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				      struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct btrfs_delayed_item *curr, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) do_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	mutex_lock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	curr = __btrfs_first_delayed_insertion_item(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (!curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		goto insert_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		goto insert_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	prev = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	curr = __btrfs_next_delayed_item(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		/* insert the continuous items into the same leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		btrfs_batch_insert_items(root, path, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	btrfs_release_delayed_item(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	btrfs_mark_buffer_dirty(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	goto do_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) insert_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				    struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				    struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				    struct btrfs_delayed_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct btrfs_delayed_item *curr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int nitems, i, last_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	BUG_ON(!path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	i = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	last_item = btrfs_header_nritems(leaf) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (i > last_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		return -ENOENT;	/* FIXME: Is errno suitable? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	next = item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	INIT_LIST_HEAD(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	btrfs_item_key_to_cpu(leaf, &key, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	nitems = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 * count the number of the dir index items that we can delete in batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		list_add_tail(&next->tree_list, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		nitems++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		curr = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		next = __btrfs_next_delayed_item(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		if (!btrfs_is_continuous_delayed_item(curr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (i > last_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		btrfs_item_key_to_cpu(leaf, &key, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (!nitems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	list_for_each_entry_safe(curr, next, &head, tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		btrfs_delayed_item_release_metadata(root, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		list_del(&curr->tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		btrfs_release_delayed_item(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				      struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				      struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				      struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct btrfs_delayed_item *curr, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	unsigned int nofs_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) do_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	mutex_lock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	curr = __btrfs_first_delayed_deletion_item(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (!curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		goto delete_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	nofs_flag = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	memalloc_nofs_restore(nofs_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		goto delete_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		 * can't find the item which the node points to, so this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		 * is invalid, just drop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		prev = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		curr = __btrfs_next_delayed_item(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		btrfs_release_delayed_item(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		if (curr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			goto do_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			goto delete_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	btrfs_batch_delete_items(trans, root, path, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	goto do_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) delete_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (delayed_node &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		BUG_ON(!delayed_node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		delayed_node->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		delayed_root = delayed_node->root->fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		finish_one_item(delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	ASSERT(delayed_node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	delayed_node->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	delayed_root = delayed_node->root->fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	finish_one_item(delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 					struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 					struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 					struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct btrfs_inode_item *inode_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	unsigned int nofs_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	int mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	key.objectid = node->inode_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	key.type = BTRFS_INODE_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		mod = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		mod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	nofs_flag = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	memalloc_nofs_restore(nofs_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				    struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			    sizeof(struct btrfs_inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	btrfs_mark_buffer_dirty(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		goto no_iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (path->slots[0] >= btrfs_header_nritems(leaf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		goto search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (key.objectid != node->inode_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	    key.type != BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 * Delayed iref deletion is for the inode who has only one link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	 * so there is only one iref. The case that several irefs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	 * in the same item doesn't exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	btrfs_del_item(trans, root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	btrfs_release_delayed_iref(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) no_iref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	btrfs_release_delayed_inode(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 * If we fail to update the delayed inode we need to abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 * transaction, because we could leave the inode with the improper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	 * counts behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (ret && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) search:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	key.type = BTRFS_INODE_EXTREF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	key.offset = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	nofs_flag = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	memalloc_nofs_restore(nofs_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	ASSERT(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 					     struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 					     struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 					     struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	mutex_lock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				   struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				   struct btrfs_delayed_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * Called when committing the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * Returns < 0 on error and returns with an aborted transaction with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * outstanding delayed items cleaned up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	struct btrfs_delayed_node *curr_node, *prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	struct btrfs_block_rsv *block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	bool count = (nr > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (TRANS_ABORTED(trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	block_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	trans->block_rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	delayed_root = fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	curr_node = btrfs_first_delayed_node(delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	while (curr_node && (!count || (count && nr--))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		ret = __btrfs_commit_inode_delayed_items(trans, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 							 curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			btrfs_release_delayed_node(curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			curr_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		prev_node = curr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		curr_node = btrfs_next_delayed_node(curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		btrfs_release_delayed_node(prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (curr_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		btrfs_release_delayed_node(curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	trans->block_rsv = block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	return __btrfs_run_delayed_items(trans, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	return __btrfs_run_delayed_items(trans, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				     struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct btrfs_block_rsv *block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (!delayed_node->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	block_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	trans->block_rsv = block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	struct btrfs_block_rsv *block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	trans = btrfs_join_transaction(delayed_node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		goto trans_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	block_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	trans->block_rsv = &fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 						   path, delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	trans->block_rsv = block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) trans_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) void btrfs_remove_delayed_node(struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	delayed_node = READ_ONCE(inode->delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	inode->delayed_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct btrfs_async_delayed_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct btrfs_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) static void btrfs_async_run_delayed_root(struct btrfs_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct btrfs_async_delayed_work *async_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct btrfs_delayed_root *delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	struct btrfs_delayed_node *delayed_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	struct btrfs_block_rsv *block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	int total_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	delayed_root = async_work->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		if (atomic_read(&delayed_root->items) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		    BTRFS_DELAYED_BACKGROUND / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		root = delayed_node->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		trans = btrfs_join_transaction(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			btrfs_release_prepared_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			total_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		block_rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		trans->block_rsv = &root->fs_info->delayed_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		trans->block_rsv = block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		btrfs_btree_balance_dirty_nodelay(root->fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		btrfs_release_prepared_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		total_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		 || total_done < async_work->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	wake_up(&delayed_root->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	kfree(async_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				     struct btrfs_fs_info *fs_info, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct btrfs_async_delayed_work *async_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (!async_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	async_work->delayed_root = delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	async_work->nr = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	int val = atomic_read(&delayed_root->items_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		seq = atomic_read(&delayed_root->items_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		wait_event_interruptible(delayed_root->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 					 could_end_wait(delayed_root, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* Will return 0 or -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				   const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 				   struct btrfs_inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				   struct btrfs_disk_key *disk_key, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 				   u64 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	struct btrfs_delayed_item *delayed_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct btrfs_dir_item *dir_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	delayed_node = btrfs_get_or_create_delayed_node(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (IS_ERR(delayed_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		return PTR_ERR(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (!delayed_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		goto release_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	delayed_item->key.objectid = btrfs_ino(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	delayed_item->key.offset = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	dir_item->location = *disk_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	btrfs_set_stack_dir_data_len(dir_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	btrfs_set_stack_dir_name_len(dir_item, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	btrfs_set_stack_dir_type(dir_item, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	memcpy((char *)(dir_item + 1), name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	 * we have reserved enough space when we start a new transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	 * so reserving metadata failure is impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		btrfs_err(trans->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			  name_len, name, delayed_node->root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			  delayed_node->inode_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) release_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 					       struct btrfs_delayed_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 					       struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct btrfs_delayed_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	mutex_lock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	item = __btrfs_lookup_delayed_insertion_item(node, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (!item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	btrfs_delayed_item_release_metadata(node->root, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	btrfs_release_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 				   struct btrfs_inode *dir, u64 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	struct btrfs_delayed_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	struct btrfs_delayed_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	struct btrfs_key item_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	node = btrfs_get_or_create_delayed_node(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		return PTR_ERR(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	item_key.objectid = btrfs_ino(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	item_key.type = BTRFS_DIR_INDEX_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	item_key.offset = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 						  &item_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	item = btrfs_alloc_delayed_item(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	if (!item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	item->key = item_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 * we have reserved enough space when we start a new transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * so reserving metadata failure is impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		btrfs_err(trans->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) "metadata reservation failed for delayed dir item deltiona, should have been reserved");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		btrfs_release_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	mutex_lock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	ret = __btrfs_add_delayed_deletion_item(node, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		btrfs_err(trans->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			  index, node->root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			  node->inode_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		btrfs_delayed_item_release_metadata(dir->root, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		btrfs_release_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	mutex_unlock(&node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	btrfs_release_delayed_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	 * Since we have held i_mutex of this directory, it is impossible that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	 * a new directory index is added into the delayed node and index_cnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * is updated now. So we needn't lock the delayed node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (!delayed_node->index_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	inode->index_cnt = delayed_node->index_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) bool btrfs_readdir_get_delayed_items(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 				     struct list_head *ins_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 				     struct list_head *del_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	struct btrfs_delayed_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	 * We can only do one readdir with delayed items at a time because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 * item->readdir_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	item = __btrfs_first_delayed_insertion_item(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	while (item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		refcount_inc(&item->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		list_add_tail(&item->readdir_list, ins_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		item = __btrfs_next_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	item = __btrfs_first_delayed_deletion_item(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	while (item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		refcount_inc(&item->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		list_add_tail(&item->readdir_list, del_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		item = __btrfs_next_delayed_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * This delayed node is still cached in the btrfs inode, so refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * must be > 1 now, and we needn't check it is going to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 * Besides that, this function is used to read dir, we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 * insert/delete delayed items in this period. So we also needn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	 * requeue or dequeue this delayed node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	refcount_dec(&delayed_node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) void btrfs_readdir_put_delayed_items(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 				     struct list_head *ins_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 				     struct list_head *del_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct btrfs_delayed_item *curr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		list_del(&curr->readdir_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		if (refcount_dec_and_test(&curr->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		list_del(&curr->readdir_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		if (refcount_dec_and_test(&curr->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	 * The VFS is going to do up_read(), so we need to downgrade back to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	 * read lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	downgrade_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int btrfs_should_delete_dir_index(struct list_head *del_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 				  u64 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	struct btrfs_delayed_item *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	list_for_each_entry(curr, del_list, readdir_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if (curr->key.offset > index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		if (curr->key.offset == index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				    struct list_head *ins_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	struct btrfs_delayed_item *curr, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct btrfs_key location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	int over = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	unsigned char d_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	if (list_empty(ins_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	 * Changing the data of the delayed item is impossible. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 * we needn't lock them. And we have held i_mutex of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 * directory, nobody can delete any directory indexes now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		list_del(&curr->readdir_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		if (curr->key.offset < ctx->pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			if (refcount_dec_and_test(&curr->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 				kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		ctx->pos = curr->key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		di = (struct btrfs_dir_item *)curr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		name = (char *)(di + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		name_len = btrfs_stack_dir_name_len(di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		d_type = fs_ftype_to_dtype(di->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		btrfs_disk_key_to_cpu(&location, &di->location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		over = !dir_emit(ctx, name, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			       location.objectid, d_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		if (refcount_dec_and_test(&curr->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		if (over)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		ctx->pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				  struct btrfs_inode_item *inode_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				  struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	btrfs_set_stack_inode_generation(inode_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 					 BTRFS_I(inode)->generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	btrfs_set_stack_inode_sequence(inode_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 				       inode_peek_iversion(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	btrfs_set_stack_inode_block_group(inode_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	btrfs_set_stack_timespec_sec(&inode_item->atime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 				     inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				      inode->i_atime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 				     inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 				      inode->i_mtime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 				     inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				      inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	btrfs_set_stack_timespec_sec(&inode_item->otime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 				     BTRFS_I(inode)->i_otime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 				     BTRFS_I(inode)->i_otime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) int btrfs_fill_inode(struct inode *inode, u32 *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	struct btrfs_inode_item *inode_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	inode_item = &delayed_node->inode_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			round_up(i_size_read(inode), fs_info->sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	inode_set_iversion_queried(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 				   btrfs_stack_inode_sequence(inode_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	inode->i_rdev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	*rdev = btrfs_stack_inode_rdev(inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	BTRFS_I(inode)->i_otime.tv_sec =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		btrfs_stack_timespec_sec(&inode_item->otime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	BTRFS_I(inode)->i_otime.tv_nsec =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		btrfs_stack_timespec_nsec(&inode_item->otime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	inode->i_generation = BTRFS_I(inode)->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	BTRFS_I(inode)->index_cnt = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 			       struct btrfs_root *root, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	if (IS_ERR(delayed_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		return PTR_ERR(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		goto release_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 						   delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		goto release_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	delayed_node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	atomic_inc(&root->fs_info->delayed_root->items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) release_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	 * we don't do delayed inode updates during log recovery because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	 * leads to enospc problems.  This means we also can't do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 * delayed inode refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	delayed_node = btrfs_get_or_create_delayed_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (IS_ERR(delayed_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		return PTR_ERR(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	 * We don't reserve space for inode ref deletion is because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	 * - We ONLY do async inode ref deletion for the inode who has only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 *   one link(i_nlink == 1), it means there is only one inode ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 *   And in most case, the inode ref and the inode item are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	 *   same leaf, and we will deal with them at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	 *   Since we are sure we will reserve the space for the inode item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	 *   it is unnecessary to reserve space for inode ref deletion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	 * - If the inode ref and the inode item are not in the same leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	 *   We also needn't worry about enospc problem, because we reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	 *   much more space for the inode update than it needs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	 * - At the worst, we can steal some space from the global reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	 *   It is very rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		goto release_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	delayed_node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	atomic_inc(&fs_info->delayed_root->items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) release_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	struct btrfs_root *root = delayed_node->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	struct btrfs_delayed_item *curr_item, *prev_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	mutex_lock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	while (curr_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		btrfs_delayed_item_release_metadata(root, curr_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		prev_item = curr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		curr_item = __btrfs_next_delayed_item(prev_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		btrfs_release_delayed_item(prev_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	while (curr_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		btrfs_delayed_item_release_metadata(root, curr_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		prev_item = curr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		curr_item = __btrfs_next_delayed_item(prev_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		btrfs_release_delayed_item(prev_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		btrfs_release_delayed_iref(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		btrfs_release_delayed_inode(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	mutex_unlock(&delayed_node->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	struct btrfs_delayed_node *delayed_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	delayed_node = btrfs_get_delayed_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (!delayed_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	__btrfs_kill_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	btrfs_release_delayed_node(delayed_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	u64 inode_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	struct btrfs_delayed_node *delayed_nodes[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		spin_lock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 					   (void **)delayed_nodes, inode_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 					   ARRAY_SIZE(delayed_nodes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			 * Don't increase refs in case the node is dead and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 			 * about to be removed from the tree in the loop below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 				delayed_nodes[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			if (!delayed_nodes[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			__btrfs_kill_delayed_node(delayed_nodes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			btrfs_release_delayed_node(delayed_nodes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	struct btrfs_delayed_node *curr_node, *prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	while (curr_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		__btrfs_kill_delayed_node(curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		prev_node = curr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		curr_node = btrfs_next_delayed_node(curr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		btrfs_release_delayed_node(prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)