^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2007 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "free-space-cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "inode-map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "delalloc-space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static void fail_caching_thread(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) btrfs_warn(fs_info, "failed to start inode caching task");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) "disabling inode map caching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) root->ino_cache_state = BTRFS_CACHE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) wake_up(&root->ino_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int caching_kthread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct btrfs_root *root = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u64 last = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) fail_caching_thread(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Since the commit root is read-only, we can safely skip locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) path->reada = READA_FORWARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) key.objectid = BTRFS_FIRST_FREE_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) key.type = BTRFS_INODE_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* need to make sure the commit_root doesn't disappear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (btrfs_fs_closing(fs_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (need_resched() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) btrfs_transaction_in_commit(fs_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (WARN_ON(btrfs_header_nritems(leaf) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Save the key so we can advances forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * in the next search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) btrfs_item_key_to_cpu(leaf, &key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) root->ino_cache_progress = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) schedule_timeout(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (key.type != BTRFS_INODE_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (key.objectid >= root->highest_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (last != (u64)-1 && last + 1 != key.objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __btrfs_add_free_space(fs_info, ctl, last + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) key.objectid - last - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) wake_up(&root->ino_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) last = key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (last < root->highest_objectid - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) __btrfs_add_free_space(fs_info, ctl, last + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) root->highest_objectid - last - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) root->ino_cache_state = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) root->ino_cache_progress = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) btrfs_unpin_free_ino(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) wake_up(&root->ino_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void start_caching(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u64 objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (root->ino_cache_state != BTRFS_CACHE_NO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) root->ino_cache_state = BTRFS_CACHE_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ret = load_free_ino_cache(fs_info, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) root->ino_cache_state = BTRFS_CACHE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) wake_up(&root->ino_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * It can be quite time-consuming to fill the cache by searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * through the extent tree, and this can keep ino allocation path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * waiting. Therefore at start we quickly find out the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * inode number and we know we can use inode numbers which fall in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret = btrfs_find_free_objectid(root, &objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __btrfs_add_free_space(fs_info, ctl, objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) BTRFS_LAST_FREE_OBJECTID - objectid + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) wake_up(&root->ino_cache_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (IS_ERR(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) fail_caching_thread(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return btrfs_find_free_objectid(root, objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *objectid = btrfs_find_ino_for_alloc(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (*objectid != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) start_caching(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) wait_event(root->ino_cache_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) root->ino_cache_state == BTRFS_CACHE_FINISHED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) root->ino_cache_state == BTRFS_CACHE_ERROR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) root->free_ino_ctl->free_space > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) root->free_ino_ctl->free_space == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return btrfs_find_free_objectid(root, objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) down_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) up_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) start_caching(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) up_write(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * When a transaction is committed, we'll move those inode numbers which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * others will just be dropped, because the commit root we were searching has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Must be called with root->fs_info->commit_root_sem held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void btrfs_unpin_free_ino(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct btrfs_free_space *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spin_lock(rbroot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) n = rb_first(rbroot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock(rbroot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) info = rb_entry(n, struct btrfs_free_space, offset_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) BUG_ON(info->bitmap); /* Logic error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (info->offset > root->ino_cache_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) count = min(root->ino_cache_progress - info->offset + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) info->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rb_erase(&info->offset_index, rbroot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock(rbroot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __btrfs_add_free_space(root->fs_info, ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) info->offset, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) kmem_cache_free(btrfs_free_space_cachep, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define INODES_PER_BITMAP (PAGE_SIZE * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The goal is to keep the memory used by the free_ino tree won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * exceed the memory if we use bitmaps only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct btrfs_free_space *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int max_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int max_bitmaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) n = rb_last(&ctl->free_space_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ctl->extents_thresh = INIT_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) info = rb_entry(n, struct btrfs_free_space, offset_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Find the maximum inode number in the filesystem. Note we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * ignore the fact that this can be a bitmap, because we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * not doing precise calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) max_ino = info->bytes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (max_bitmaps <= ctl->total_bitmaps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ctl->extents_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) PAGE_SIZE / sizeof(*info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * We don't fall back to bitmap, if we are below the extents threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * or this chunk of inode numbers is a big one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct btrfs_free_space *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (ctl->free_extents < ctl->extents_thresh ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) info->bytes > INODES_PER_BITMAP / 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static const struct btrfs_free_space_op free_ino_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .recalc_thresholds = recalculate_thresholds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .use_bitmap = use_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct btrfs_free_space *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * We always use extents for two reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * - The pinned tree is only used during the process of caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * - Make code simpler. See btrfs_unpin_free_ino().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static const struct btrfs_free_space_op pinned_free_ino_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .recalc_thresholds = pinned_recalc_thresholds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .use_bitmap = pinned_use_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void btrfs_init_free_ino_ctl(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_lock_init(&ctl->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ctl->unit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ctl->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ctl->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ctl->op = &free_ino_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) INIT_LIST_HEAD(&ctl->trimming_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) mutex_init(&ctl->cache_writeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Initially we allow to use 16K of ram to cache chunks of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * inode numbers before we resort to bitmaps. This is somewhat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * arbitrary, but it will be adjusted in runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ctl->extents_thresh = INIT_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_lock_init(&pinned->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pinned->unit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pinned->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pinned->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pinned->extents_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pinned->op = &pinned_free_ino_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int btrfs_save_ino_cache(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct btrfs_block_rsv *rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct extent_changeset *data_reserved = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u64 alloc_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) bool retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* only fs tree and subvol/snap needs ino cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Don't save inode cache if we are deleting this root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (btrfs_root_refs(&root->root_item) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) trans->block_rsv = &fs_info->trans_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) num_bytes = trans->bytes_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * 1 item for inode item insertion if need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * 4 items for inode item update (in the worst case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * 1 items for slack space if we need do truncation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * 1 item for free space object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * 3 items for pre-allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = btrfs_block_rsv_add(root, trans->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) trans->bytes_reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) BTRFS_RESERVE_NO_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) trans->bytes_reserved, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) inode = lookup_free_ino_inode(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ret = PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) BUG_ON(retry); /* Logic error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ret = create_free_ino_inode(root, trans, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) BTRFS_I(inode)->generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = btrfs_update_inode(trans, root, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (i_size_read(inode) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) spin_lock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock(&root->ino_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) spin_lock(&ctl->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) prealloc = ALIGN(prealloc, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) prealloc += ctl->total_bitmaps * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_unlock(&ctl->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Just to make sure we have enough space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) prealloc += 8 * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) prealloc, prealloc, &alloc_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = btrfs_write_out_ino_cache(root, trans, path, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) trans->bytes_reserved, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) btrfs_block_rsv_release(fs_info, trans->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) trans->bytes_reserved, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) trans->block_rsv = rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) trans->bytes_reserved = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) extent_changeset_free(data_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct extent_buffer *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct btrfs_key search_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) search_key.type = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) search_key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) BUG_ON(ret == 0); /* Corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (path->slots[0] > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) slot = path->slots[0] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) l = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) btrfs_item_key_to_cpu(l, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *objectid = max_t(u64, found_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) BTRFS_FIRST_FREE_OBJECTID - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mutex_lock(&root->objectid_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) btrfs_warn(root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) "the objectid of root %llu reaches its highest value",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *objectid = ++root->highest_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mutex_unlock(&root->objectid_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }