^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/f2fs/node.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2012 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.samsung.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct kmem_cache *nat_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static struct kmem_cache *free_nid_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct kmem_cache *nat_entry_set_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct kmem_cache *fsync_node_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Check whether the given nid is within node id range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __func__, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct sysinfo val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long avail_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long mem_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool res = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!nm_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) si_meminfo(&val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* only uses low memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) avail_ram = val.totalram - val.totalhigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (type == FREE_NIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mem_size = (nm_i->nid_cnt[FREE_NID] *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sizeof(struct free_nid)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) } else if (type == NAT_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) sizeof(struct nat_entry)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (excess_cached_nats(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) res = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) } else if (type == DIRTY_DENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (sbi->sb->s_bdi->wb.dirty_exceeded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else if (type == INO_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for (i = 0; i < MAX_INO_ENTRY; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mem_size += sbi->im[i].ino_num *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) sizeof(struct ino_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) mem_size >>= PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } else if (type == EXTENT_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mem_size = (atomic_read(&sbi->total_ext_tree) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) sizeof(struct extent_tree) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) atomic_read(&sbi->total_ext_node) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sizeof(struct extent_node)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else if (type == INMEM_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* it allows 20% / total_ram for inmemory pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) res = mem_size < (val.totalram / 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else if (type == DISCARD_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sizeof(struct discard_cmd)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } else if (type == COMPRESS_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long free_ram = val.freeram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * free memory is lower than watermark or cached page count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * exceed threshold, deny caching compress page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) (COMPRESS_MAPPING(sbi)->nrpages <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) free_ram * sbi->compress_percent / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) res = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!sbi->sb->s_bdi->wb.dirty_exceeded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void clear_node_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) f2fs_clear_page_cache_dirty_tag(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) clear_page_dirty_for_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct page *src_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct page *dst_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pgoff_t dst_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void *src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* get current nat block page with lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) src_page = get_current_nat_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (IS_ERR(src_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return src_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dst_page = f2fs_grab_meta_page(sbi, dst_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) f2fs_bug_on(sbi, PageDirty(src_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) src_addr = page_address(src_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dst_addr = page_address(dst_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memcpy(dst_addr, src_addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) set_page_dirty(dst_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) f2fs_put_page(src_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) set_to_next_nat(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return dst_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct nat_entry *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (no_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) nat_set_nid(new, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) nat_reset_flag(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void __free_nat_entry(struct nat_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kmem_cache_free(nat_entry_slab, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* must be locked by nat_tree_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (no_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (raw_ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) node_info_from_raw_nat(&ne->ni, raw_ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) list_add_tail(&ne->list, &nm_i->nat_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) nm_i->nat_cnt[TOTAL_NAT]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) nm_i->nat_cnt[RECLAIMABLE_NAT]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct nat_entry *ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ne = radix_tree_lookup(&nm_i->nat_root, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* for recent accessed nat entry, move it to tail of lru list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ne && !get_nat_flag(ne, IS_DIRTY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!list_empty(&ne->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) list_move_tail(&ne->list, &nm_i->nat_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) nid_t start, unsigned int nr, struct nat_entry **ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nm_i->nat_cnt[TOTAL_NAT]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nm_i->nat_cnt[RECLAIMABLE_NAT]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __free_nat_entry(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct nat_entry *ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct nat_entry_set *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) head = radix_tree_lookup(&nm_i->nat_set_root, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) INIT_LIST_HEAD(&head->entry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) INIT_LIST_HEAD(&head->set_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) head->set = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) head->entry_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct nat_entry *ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct nat_entry_set *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!new_ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) head = __grab_nat_entry_set(nm_i, ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * update entry_cnt in below condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * 1. update NEW_ADDR to valid block address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * 2. update old block address to new one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) !get_nat_flag(ne, IS_DIRTY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) head->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) set_nat_flag(ne, IS_PREALLOC, new_ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (get_nat_flag(ne, IS_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto refresh_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) nm_i->nat_cnt[DIRTY_NAT]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) nm_i->nat_cnt[RECLAIMABLE_NAT]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) set_nat_flag(ne, IS_DIRTY, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) refresh_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (new_ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_del_init(&ne->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) list_move_tail(&ne->list, &head->entry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct nat_entry_set *set, struct nat_entry *ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) list_move_tail(&ne->list, &nm_i->nat_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) set_nat_flag(ne, IS_DIRTY, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) set->entry_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nm_i->nat_cnt[DIRTY_NAT]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nm_i->nat_cnt[RECLAIMABLE_NAT]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) nid_t start, unsigned int nr, struct nat_entry_set **ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) start, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return NODE_MAPPING(sbi) == page->mapping &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) IS_DNODE(page) && is_cold_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_lock_init(&sbi->fsync_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) INIT_LIST_HEAD(&sbi->fsync_node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sbi->fsync_seg_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) sbi->fsync_node_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct fsync_node_entry *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) fn->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) INIT_LIST_HEAD(&fn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_lock_irqsave(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) list_add_tail(&fn->list, &sbi->fsync_node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) fn->seq_id = sbi->fsync_seg_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) seq_id = fn->seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) sbi->fsync_node_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct fsync_node_entry *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_lock_irqsave(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) list_for_each_entry(fn, &sbi->fsync_node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (fn->page == page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) list_del(&fn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sbi->fsync_node_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kmem_cache_free(fsync_node_entry_slab, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock_irqsave(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sbi->fsync_seg_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct nat_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bool need = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) e = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!get_nat_flag(e, IS_CHECKPOINTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) !get_nat_flag(e, HAS_FSYNCED_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) need = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct nat_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bool is_cp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) e = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (e && !get_nat_flag(e, IS_CHECKPOINTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) is_cp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return is_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct nat_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bool need_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) e = __lookup_nat_cache(nm_i, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (get_nat_flag(e, IS_CHECKPOINTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) get_nat_flag(e, HAS_FSYNCED_INODE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) need_update = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return need_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* must be locked by nat_tree_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct f2fs_nat_entry *ne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct nat_entry *new, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) new = __alloc_nat_entry(nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) f2fs_down_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) e = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) e = __init_nat_entry(nm_i, new, ne, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) nat_get_blkaddr(e) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) le32_to_cpu(ne->block_addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) nat_get_version(e) != ne->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (e != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) __free_nat_entry(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) block_t new_blkaddr, bool fsync_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct nat_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) f2fs_down_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) e = __lookup_nat_cache(nm_i, ni->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) e = __init_nat_entry(nm_i, new, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) copy_node_info(&e->ni, ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else if (new_blkaddr == NEW_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * when nid is reallocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * previous nat entry can be remained in nat cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * So, reinitialize it with new information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) copy_node_info(&e->ni, ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* let's free early to reduce memory consumption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (e != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) __free_nat_entry(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) new_blkaddr == NULL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) new_blkaddr == NEW_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) new_blkaddr == NEW_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* increment version no as node is removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned char version = nat_get_version(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) nat_set_version(e, inc_node_version(version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* change address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) nat_set_blkaddr(e, new_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!__is_valid_data_blkaddr(new_blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) set_nat_flag(e, IS_CHECKPOINTED, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) __set_nat_cache_dirty(nm_i, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* update fsync_mark if its inode nat entry is still alive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ni->nid != ni->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) e = __lookup_nat_cache(nm_i, ni->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (fsync_done && ni->nid == ni->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) set_nat_flag(e, HAS_FSYNCED_INODE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int nr = nr_shrink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) while (nr_shrink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct nat_entry *ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (list_empty(&nm_i->nat_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ne = list_first_entry(&nm_i->nat_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct nat_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) list_del(&ne->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) __del_from_nat_cache(nm_i, ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) nr_shrink--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return nr - nr_shrink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct node_info *ni, bool checkpoint_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct f2fs_journal *journal = curseg->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) nid_t start_nid = START_NID(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct f2fs_nat_block *nat_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct f2fs_nat_entry ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct nat_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ni->nid = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Check nat cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) e = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ni->ino = nat_get_ino(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ni->blk_addr = nat_get_blkaddr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ni->version = nat_get_version(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Check current segment summary by trying to grab journal_rwsem first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * This sem is on the critical path on the checkpoint requiring the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * nat_tree_lock. Therefore, we should retry, if we failed to grab here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * while not bothering checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) down_read(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) !down_read_trylock(&curseg->journal_rwsem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ne = nat_in_journal(journal, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) node_info_from_raw_nat(ni, &ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) up_read(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Fill node_info from nat page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) index = current_nat_addr(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) page = f2fs_get_meta_page(sbi, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) nat_blk = (struct f2fs_nat_block *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ne = nat_blk->entries[nid - start_nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) node_info_from_raw_nat(ni, &ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) blkaddr = le32_to_cpu(ne.block_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (__is_valid_data_blkaddr(blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* cache nat entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cache_nat_entry(sbi, nid, &ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * readahead MAX_RA_NODE number of node pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void f2fs_ra_node_pages(struct page *parent, int start, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int i, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) nid_t nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Then, try readahead for siblings of the desired node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) end = start + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) end = min(end, NIDS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) for (i = start; i < end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) nid = get_nid(parent, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) f2fs_ra_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) const long direct_index = ADDRS_PER_INODE(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int cur_level = dn->cur_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int max_level = dn->max_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pgoff_t base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!dn->max_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return pgofs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) while (max_level-- > cur_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) skipped_unit *= NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) switch (dn->max_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) base += 2 * indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) base += 2 * direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) base += direct_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * The maximum depth is four.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Offset[0] will have raw inode offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int get_node_path(struct inode *inode, long block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int offset[4], unsigned int noffset[4])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) const long direct_index = ADDRS_PER_INODE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) const long direct_blks = ADDRS_PER_BLOCK(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) const long dptrs_per_blk = NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) noffset[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (block < direct_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) offset[n] = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) block -= direct_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (block < direct_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) offset[n++] = NODE_DIR1_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) noffset[n] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) offset[n] = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) block -= direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (block < direct_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) offset[n++] = NODE_DIR2_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) noffset[n] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) offset[n] = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) block -= direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (block < indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) offset[n++] = NODE_IND1_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) noffset[n] = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) offset[n++] = block / direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) noffset[n] = 4 + offset[n - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) offset[n] = block % direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) level = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) block -= indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (block < indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) offset[n++] = NODE_IND2_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) noffset[n] = 4 + dptrs_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) offset[n++] = block / direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) offset[n] = block % direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) level = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) block -= indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (block < dindirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) offset[n++] = NODE_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) noffset[n] = 5 + (dptrs_per_blk * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) offset[n++] = block / indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) noffset[n] = 6 + (dptrs_per_blk * 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) offset[n - 1] * (dptrs_per_blk + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) offset[n++] = (block / direct_blks) % dptrs_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) noffset[n] = 7 + (dptrs_per_blk * 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) offset[n - 2] * (dptrs_per_blk + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) offset[n - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) offset[n] = block % direct_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) level = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) got:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Caller should call f2fs_put_dnode(dn).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct page *npage[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct page *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int offset[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int noffset[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) nid_t nids[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int level, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) level = get_node_path(dn->inode, index, offset, noffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (level < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) nids[0] = dn->inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) npage[0] = dn->inode_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!npage[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) npage[0] = f2fs_get_node_page(sbi, nids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (IS_ERR(npage[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return PTR_ERR(npage[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* if inline_data is set, should not report any block indices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (f2fs_has_inline_data(dn->inode) && index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) f2fs_put_page(npage[0], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto release_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) parent = npage[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (level != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) nids[1] = get_nid(parent, offset[0], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dn->inode_page = npage[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dn->inode_page_locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* get indirect or direct nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) for (i = 1; i <= level; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!nids[i] && mode == ALLOC_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* alloc new node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto release_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dn->nid = nids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) npage[i] = f2fs_new_node_page(dn, noffset[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (IS_ERR(npage[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) f2fs_alloc_nid_failed(sbi, nids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) err = PTR_ERR(npage[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto release_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) set_nid(parent, offset[i - 1], nids[i], i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) f2fs_alloc_nid_done(sbi, nids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (IS_ERR(npage[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) err = PTR_ERR(npage[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) goto release_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dn->inode_page_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) unlock_page(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) f2fs_put_page(parent, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) npage[i] = f2fs_get_node_page(sbi, nids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (IS_ERR(npage[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) err = PTR_ERR(npage[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) f2fs_put_page(npage[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) goto release_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (i < level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) parent = npage[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) nids[i + 1] = get_nid(parent, offset[i], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dn->nid = nids[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dn->ofs_in_node = offset[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dn->node_page = npage[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dn->data_blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) release_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) f2fs_put_page(parent, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (i > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) f2fs_put_page(npage[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) release_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dn->inode_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dn->node_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dn->cur_level = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) dn->max_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) dn->ofs_in_node = offset[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int truncate_node(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Deallocate node address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) f2fs_invalidate_blocks(sbi, ni.blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) set_node_addr(sbi, &ni, NULL_ADDR, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (dn->nid == dn->inode->i_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) f2fs_remove_orphan_inode(sbi, dn->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dec_valid_inode_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) f2fs_inode_synced(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) clear_node_page_dirty(dn->node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) set_sbi_flag(sbi, SBI_IS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) index = dn->node_page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) f2fs_put_page(dn->node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) invalidate_mapping_pages(NODE_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dn->node_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static int truncate_dnode(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (dn->nid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* get direct node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (PTR_ERR(page) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) else if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Make dnode_of_data for parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) dn->node_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dn->ofs_in_node = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) f2fs_truncate_data_blocks(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) err = truncate_node(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) int ofs, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct dnode_of_data rdn = *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct f2fs_node *rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) nid_t child_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned int child_nofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (dn->nid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return NIDS_PER_BLOCK + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rn = F2FS_NODE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (depth < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) child_nid = le32_to_cpu(rn->in.nid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (child_nid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rdn.nid = child_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = truncate_dnode(&rdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (set_nid(page, i, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) for (i = ofs; i < NIDS_PER_BLOCK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) child_nid = le32_to_cpu(rn->in.nid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (child_nid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) child_nofs += NIDS_PER_BLOCK + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rdn.nid = child_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (ret == (NIDS_PER_BLOCK + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (set_nid(page, i, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) child_nofs += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) } else if (ret < 0 && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) freed = child_nofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* remove current indirect node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dn->node_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ret = truncate_node(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) trace_f2fs_truncate_nodes_exit(dn->inode, freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) trace_f2fs_truncate_nodes_exit(dn->inode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static int truncate_partial_nodes(struct dnode_of_data *dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct f2fs_inode *ri, int *offset, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct page *pages[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) nid_t nid[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) nid_t child_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int idx = depth - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!nid[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* get indirect nodes in the path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; i < idx + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* reference count'll be increased */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (IS_ERR(pages[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) err = PTR_ERR(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) idx = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* free direct nodes linked to a partial indirect node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) child_nid = get_nid(pages[idx], i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!child_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dn->nid = child_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) err = truncate_dnode(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (set_nid(pages[idx], i, 0, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (offset[idx + 1] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dn->node_page = pages[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dn->nid = nid[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) err = truncate_node(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) f2fs_put_page(pages[idx], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) offset[idx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) offset[idx + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) for (i = idx; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) f2fs_put_page(pages[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * All the block addresses of data and nodes should be nullified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int err = 0, cont = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int level, offset[4], noffset[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) unsigned int nofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct f2fs_inode *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) trace_f2fs_truncate_inode_blocks_enter(inode, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) level = get_node_path(inode, from, offset, noffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (level < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) trace_f2fs_truncate_inode_blocks_exit(inode, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) page = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) set_new_dnode(&dn, inode, page, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ri = F2FS_INODE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) switch (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) nofs = noffset[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) nofs = noffset[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!offset[level - 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto skip_partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) err = truncate_partial_nodes(&dn, ri, offset, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (err < 0 && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) nofs += 1 + NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) nofs = 5 + 2 * NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!offset[level - 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto skip_partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err = truncate_partial_nodes(&dn, ri, offset, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (err < 0 && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) skip_partial:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) while (cont) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) switch (offset[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case NODE_DIR1_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case NODE_DIR2_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err = truncate_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case NODE_IND1_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case NODE_IND2_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) err = truncate_nodes(&dn, nofs, offset[1], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case NODE_DIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) err = truncate_nodes(&dn, nofs, offset[1], 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (err < 0 && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (offset[1] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) BUG_ON(page->mapping != NODE_MAPPING(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) f2fs_wait_on_page_writeback(page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) offset[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) offset[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) nofs += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) f2fs_put_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) trace_f2fs_truncate_inode_blocks_exit(inode, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return err > 0 ? 0 : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* caller must lock inode page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int f2fs_truncate_xattr_node(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) nid_t nid = F2FS_I(inode)->i_xattr_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct page *npage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) npage = f2fs_get_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (IS_ERR(npage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return PTR_ERR(npage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) set_new_dnode(&dn, inode, NULL, npage, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) err = truncate_node(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) f2fs_put_page(npage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) f2fs_i_xnid_write(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * Caller should grab and release a rwsem by calling f2fs_lock_op() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * f2fs_unlock_op().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) int f2fs_remove_inode_page(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) err = f2fs_truncate_xattr_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* remove potential inline_data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) S_ISLNK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) f2fs_truncate_data_blocks_range(&dn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* 0 is possible, after f2fs_new_inode() has failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) f2fs_warn(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) inode->i_ino, (unsigned long long)inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* will put inode & node pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err = truncate_node(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct page *f2fs_new_inode_page(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* allocate inode page for new inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* caller should f2fs_put_page(page, 1); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return f2fs_new_node_page(&dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct node_info new_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dec_valid_node_count(sbi, dn->inode, !ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) new_ni.nid = dn->nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) new_ni.ino = dn->inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) new_ni.blk_addr = NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) new_ni.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) new_ni.version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) set_node_addr(sbi, &new_ni, NEW_ADDR, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) f2fs_wait_on_page_writeback(page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) set_cold_node(page, S_ISDIR(dn->inode->i_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (set_page_dirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (f2fs_has_xattr_block(ofs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) f2fs_i_xnid_write(dn->inode, dn->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (ofs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) inc_valid_inode_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) clear_node_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * Caller should do after getting the following values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * 0: f2fs_put_page(page, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * LOCKED_PAGE or error: f2fs_put_page(page, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int read_node_page(struct page *page, int op_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct f2fs_sb_info *sbi = F2FS_P_SB(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .sbi = sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .type = NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .op = REQ_OP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .op_flags = op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (!f2fs_inode_chksum_verify(sbi, page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return -EFSBADCRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return LOCKED_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) err = f2fs_get_node_info(sbi, page->index, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (unlikely(ni.blk_addr == NULL_ADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) err = f2fs_submit_page_bio(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * Readahead a node page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct page *apage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (f2fs_check_nid_range(sbi, nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (apage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (!apage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) err = read_node_page(apage, REQ_RAHEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) f2fs_put_page(apage, err ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct page *parent, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (f2fs_check_nid_range(sbi, nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) err = read_node_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) } else if (err == LOCKED_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) goto page_hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!f2fs_inode_chksum_verify(sbi, page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) err = -EFSBADCRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) page_hit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (unlikely(nid != nid_of_node(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) nid, nid_of_node(page), ino_of_node(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ofs_of_node(page), cpver_of_node(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) next_blkaddr_of_node(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return __get_node_page(sbi, nid, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct page *f2fs_get_node_page_ra(struct page *parent, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) nid_t nid = get_nid(parent, start, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return __get_node_page(sbi, nid, parent, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* should flush inline_data before evict_inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) inode = ilookup(sbi->sb, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) page = f2fs_pagecache_get_page(inode->i_mapping, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) FGP_LOCK|FGP_NOWAIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto iput_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) goto page_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) goto page_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!clear_page_dirty_for_io(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) goto page_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ret = f2fs_write_inline_data(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) inode_dec_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) f2fs_remove_dirty_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) page_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) iput_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct page *last_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) PAGECACHE_TAG_DIRTY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) f2fs_put_page(last_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!IS_DNODE(page) || !is_cold_node(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (ino_of_node(page) != ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (ino_of_node(page) != ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) /* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (last_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) f2fs_put_page(last_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) last_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static int __write_node_page(struct page *page, bool atomic, bool *submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct writeback_control *wbc, bool do_balance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) enum iostat_type io_type, unsigned int *seq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct f2fs_sb_info *sbi = F2FS_P_SB(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) nid_t nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .sbi = sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .ino = ino_of_node(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .type = NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) .op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .op_flags = wbc_to_write_flags(wbc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) .submitted = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) .io_type = io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) .io_wbc = wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) trace_f2fs_writepage(page, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) dec_page_count(sbi, F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) wbc->sync_mode == WB_SYNC_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) IS_DNODE(page) && is_cold_node(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* get old block addr of this node page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) nid = nid_of_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) f2fs_bug_on(sbi, page->index != nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (wbc->for_reclaim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (!f2fs_down_read_trylock(&sbi->node_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) f2fs_down_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* This page is already truncated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (unlikely(ni.blk_addr == NULL_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) dec_page_count(sbi, F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (__is_valid_data_blkaddr(ni.blk_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (atomic && !test_opt(sbi, NOBARRIER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /* should add to global list before clearing PAGECACHE status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (f2fs_in_warm_node_list(sbi, page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) seq = f2fs_add_fsync_node_entry(sbi, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (seq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) *seq_id = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) fio.old_blkaddr = ni.blk_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) f2fs_do_write_node_page(nid, &fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dec_page_count(sbi, F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (wbc->for_reclaim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) submitted = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) f2fs_submit_merged_write(sbi, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) submitted = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (submitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) *submitted = fio.submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (do_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) f2fs_balance_fs(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) redirty_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return AOP_WRITEPAGE_ACTIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) int f2fs_move_node_page(struct page *node_page, int gc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (gc_type == FG_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct writeback_control wbc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) .sync_mode = WB_SYNC_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) .nr_to_write = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .for_reclaim = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) f2fs_wait_on_page_writeback(node_page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) set_page_dirty(node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (!clear_page_dirty_for_io(node_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) goto out_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (__write_node_page(node_page, false, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) &wbc, false, FS_GC_NODE_IO, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) unlock_page(node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto release_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* set page dirty and write it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (!PageWriteback(node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) set_page_dirty(node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) out_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) unlock_page(node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) release_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) f2fs_put_page(node_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static int f2fs_write_node_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return __write_node_page(page, false, NULL, wbc, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) FS_NODE_IO, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct writeback_control *wbc, bool atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) unsigned int *seq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct page *last_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) bool marked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) nid_t ino = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int nwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) last_page = last_fsync_dnode(sbi, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (IS_ERR_OR_NULL(last_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return PTR_ERR_OR_ZERO(last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) PAGECACHE_TAG_DIRTY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) bool submitted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) f2fs_put_page(last_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (!IS_DNODE(page) || !is_cold_node(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (ino_of_node(page) != ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (ino_of_node(page) != ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (!PageDirty(page) && page != last_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) f2fs_wait_on_page_writeback(page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) set_fsync_mark(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) set_dentry_mark(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (!atomic || page == last_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) set_fsync_mark(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (IS_INODE(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (is_inode_flag_set(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) FI_DIRTY_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) f2fs_update_inode(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) set_dentry_mark(page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) f2fs_need_dentry_mark(sbi, ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* may be written by other thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!clear_page_dirty_for_io(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ret = __write_node_page(page, atomic &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) page == last_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) &submitted, wbc, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) FS_NODE_IO, seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) f2fs_put_page(last_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) } else if (submitted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) nwritten++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (page == last_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) f2fs_put_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) marked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ret || marked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (!ret && atomic && !marked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ino, last_page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) lock_page(last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) f2fs_wait_on_page_writeback(last_page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) set_page_dirty(last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) unlock_page(last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (nwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return ret ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) bool clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (inode->i_ino != ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) spin_lock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) clean = list_empty(&F2FS_I(inode)->gdirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) spin_unlock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (clean)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) inode = igrab(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static bool flush_dirty_inode(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct f2fs_sb_info *sbi = F2FS_P_SB(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) nid_t ino = ino_of_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) f2fs_update_inode(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) pgoff_t index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) while ((nr_pages = pagevec_lookup_tag(&pvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (!IS_DNODE(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /* flush inline_data, if it's async context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (page_private_inline(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) clear_page_private_inline(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) flush_inline_data(sbi, ino_of_node(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) bool do_balance, enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) int step = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) int nwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int nr_pages, done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) next_step:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) bool submitted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) bool may_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* give a priority to WB_SYNC threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (atomic_read(&sbi->wb_sync_req[NODE]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) wbc->sync_mode == WB_SYNC_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * flushing sequence with step:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * 0. indirect nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * 1. dentry dnodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * 2. file dnodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (step == 0 && IS_DNODE(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (step == 1 && (!IS_DNODE(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) is_cold_node(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (step == 2 && (!IS_DNODE(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) !is_cold_node(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) lock_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) else if (!trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) /* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* flush inline_data/inode, if it's async context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (!do_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) goto write_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* flush inline_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (page_private_inline(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) clear_page_private_inline(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) flush_inline_data(sbi, ino_of_node(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto lock_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* flush dirty inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (IS_INODE(page) && may_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) may_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (flush_dirty_inode(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) goto lock_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) write_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) f2fs_wait_on_page_writeback(page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (!clear_page_dirty_for_io(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) set_fsync_mark(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) set_dentry_mark(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) ret = __write_node_page(page, false, &submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) wbc, do_balance, io_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) else if (submitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) nwritten++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (--wbc->nr_to_write == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (wbc->nr_to_write == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) step = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (step < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) wbc->sync_mode == WB_SYNC_NONE && step == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) step++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) goto next_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (nwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) f2fs_submit_merged_write(sbi, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (unlikely(f2fs_cp_error(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) unsigned int seq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct fsync_node_entry *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct list_head *head = &sbi->fsync_node_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) unsigned int cur_seq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int ret2, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) while (seq_id && cur_seq_id < seq_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) spin_lock_irqsave(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) fn = list_first_entry(head, struct fsync_node_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (fn->seq_id > seq_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) cur_seq_id = fn->seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) page = fn->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) f2fs_wait_on_page_writeback(page, NODE, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (TestClearPageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ret2 = filemap_check_errors(NODE_MAPPING(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static int f2fs_write_node_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /* balancing f2fs's metadata in background */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) f2fs_balance_fs_bg(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /* collect a number of dirty node pages and write together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (wbc->sync_mode != WB_SYNC_ALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) get_pages(sbi, F2FS_DIRTY_NODES) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) nr_pages_to_skip(sbi, NODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) atomic_inc(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) else if (atomic_read(&sbi->wb_sync_req[NODE])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* to avoid potential deadlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (current->plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) blk_finish_plug(current->plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) trace_f2fs_writepages(mapping->host, wbc, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) diff = nr_pages_to_write(sbi, NODE, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) atomic_dec(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) skip_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) trace_f2fs_writepages(mapping->host, wbc, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static int f2fs_set_node_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) trace_f2fs_set_page_dirty(page, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (IS_INODE(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) f2fs_inode_chksum_set(F2FS_P_SB(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) set_page_private_reference(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * Structure of the f2fs node operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) const struct address_space_operations f2fs_node_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .writepage = f2fs_write_node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .writepages = f2fs_write_node_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) .set_page_dirty = f2fs_set_node_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) .invalidatepage = f2fs_invalidate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) .releasepage = f2fs_release_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) .migratepage = f2fs_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) nid_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) return radix_tree_lookup(&nm_i->free_nid_root, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static int __insert_free_nid(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) struct free_nid *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) nm_i->nid_cnt[FREE_NID]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) list_add_tail(&i->list, &nm_i->free_nid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) static void __remove_free_nid(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct free_nid *i, enum nid_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) f2fs_bug_on(sbi, state != i->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) nm_i->nid_cnt[state]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (state == FREE_NID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) list_del(&i->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) radix_tree_delete(&nm_i->free_nid_root, i->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) enum nid_state org_state, enum nid_state dst_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) f2fs_bug_on(sbi, org_state != i->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) i->state = dst_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) nm_i->nid_cnt[org_state]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) nm_i->nid_cnt[dst_state]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) switch (dst_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) case PREALLOC_NID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) list_del(&i->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) case FREE_NID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) list_add_tail(&i->list, &nm_i->free_nid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) bool set, bool build)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) unsigned int nid_ofs = nid - START_NID(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) nm_i->free_nid_count[nat_ofs]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!build)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) nm_i->free_nid_count[nat_ofs]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) /* return if the nid is recognized as free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) static bool add_free_nid(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) nid_t nid, bool build, bool update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) struct free_nid *i, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct nat_entry *ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* 0 nid should not be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (unlikely(nid == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (unlikely(f2fs_check_nid_range(sbi, nid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) i->nid = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) i->state = FREE_NID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (build) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * Thread A Thread B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * - f2fs_create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * - f2fs_new_inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * - f2fs_alloc_nid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * - __insert_nid_to_list(PREALLOC_NID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * - f2fs_balance_fs_bg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * - f2fs_build_free_nids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * - __f2fs_build_free_nids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * - scan_nat_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * - add_free_nid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) * - __lookup_nat_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * - f2fs_add_link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * - f2fs_init_inode_metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * - f2fs_new_inode_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * - f2fs_new_node_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * - set_node_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * - f2fs_alloc_nid_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * - __remove_nid_from_list(PREALLOC_NID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * - __insert_nid_to_list(FREE_NID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) ne = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) nat_get_blkaddr(ne) != NULL_ADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) e = __lookup_free_nid_list(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (e->state == FREE_NID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) err = __insert_free_nid(sbi, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) update_free_nid_bitmap(sbi, nid, ret, build);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (!build)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) nm_i->available_nids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct free_nid *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) bool need_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) i = __lookup_free_nid_list(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (i && i->state == FREE_NID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) __remove_free_nid(sbi, i, FREE_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) need_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (need_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static int scan_nat_page(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct page *nat_page, nid_t start_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct f2fs_nat_block *nat_blk = page_address(nat_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) block_t blk_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) i = start_nid % NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (unlikely(start_nid >= nm_i->max_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (blk_addr == NEW_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (blk_addr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) add_free_nid(sbi, start_nid, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) spin_lock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) update_free_nid_bitmap(sbi, start_nid, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) spin_unlock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) static void scan_curseg_cache(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct f2fs_journal *journal = curseg->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) down_read(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) for (i = 0; i < nats_in_cursum(journal); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) block_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) nid_t nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) nid = le32_to_cpu(nid_in_journal(journal, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (addr == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) add_free_nid(sbi, nid, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) remove_free_nid(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) up_read(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) unsigned int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) nid_t nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) for (i = 0; i < nm_i->nat_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (!test_bit_le(i, nm_i->nat_block_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (!nm_i->free_nid_count[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) NAT_ENTRY_PER_BLOCK, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (idx >= NAT_ENTRY_PER_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) nid = i * NAT_ENTRY_PER_BLOCK + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) add_free_nid(sbi, nid, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) scan_curseg_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) bool sync, bool mount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) int i = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) nid_t nid = nm_i->next_scan_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (unlikely(nid >= nm_i->max_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) /* Enough entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (!mount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /* try to find free nids in free_nid_bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) scan_free_nid_bits(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) /* readahead nat pages to be scanned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) META_NAT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) f2fs_down_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) nm_i->nat_block_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) struct page *page = get_current_nat_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) ret = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ret = scan_nat_page(sbi, page, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (unlikely(nid >= nm_i->max_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (++i >= FREE_NID_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) /* go to the next free nat pages to find free nids abundantly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) nm_i->next_scan_nid = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /* find free nids from current sum_pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) scan_curseg_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) f2fs_up_read(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) nm_i->ra_nid_pages, META_NAT, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) mutex_lock(&NM_I(sbi)->build_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ret = __f2fs_build_free_nids(sbi, sync, mount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) mutex_unlock(&NM_I(sbi)->build_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) * If this function returns success, caller can obtain a new nid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * from second parameter of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) * The returned nid could be used ino as well as nid when inode is created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct free_nid *i = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (unlikely(nm_i->available_nids == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /* We should not use stale free nids created by f2fs_build_free_nids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) i = list_first_entry(&nm_i->free_nid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) struct free_nid, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) *nid = i->nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) nm_i->available_nids--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) update_free_nid_bitmap(sbi, *nid, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) /* Let's scan nat pages and its caches to get free nids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (!f2fs_build_free_nids(sbi, true, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * f2fs_alloc_nid() should be called prior to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) struct free_nid *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) i = __lookup_free_nid_list(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) f2fs_bug_on(sbi, !i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) __remove_free_nid(sbi, i, PREALLOC_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * f2fs_alloc_nid() should be called prior to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) struct free_nid *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) bool need_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (!nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) i = __lookup_free_nid_list(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) f2fs_bug_on(sbi, !i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) __remove_free_nid(sbi, i, PREALLOC_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) need_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) nm_i->available_nids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) update_free_nid_bitmap(sbi, nid, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (need_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) int nr = nr_shrink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (!mutex_trylock(&nm_i->build_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct free_nid *i, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) unsigned int batch = SHRINK_NID_BATCH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (!nr_shrink || !batch ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) __remove_free_nid(sbi, i, FREE_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) nr_shrink--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) batch--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) mutex_unlock(&nm_i->build_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return nr - nr_shrink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) void *src_addr, *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) size_t inline_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct f2fs_inode *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) ri = F2FS_INODE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (ri->i_inline & F2FS_INLINE_XATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (!f2fs_has_inline_xattr(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) set_inode_flag(inode, FI_INLINE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) stat_inc_inline_xattr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (f2fs_has_inline_xattr(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) stat_dec_inline_xattr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) clear_inode_flag(inode, FI_INLINE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) goto update_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) dst_addr = inline_xattr_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) src_addr = inline_xattr_addr(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) inline_size = inline_xattr_size(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) f2fs_wait_on_page_writeback(ipage, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) memcpy(dst_addr, src_addr, inline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) update_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) f2fs_update_inode(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) nid_t new_xnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) struct page *xpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (!prev_xnid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) goto recover_xnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) /* 1: invalidate the previous xattr nid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) f2fs_invalidate_blocks(sbi, ni.blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) dec_valid_node_count(sbi, inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) set_node_addr(sbi, &ni, NULL_ADDR, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) recover_xnid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /* 2: update xattr nid in inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (!f2fs_alloc_nid(sbi, &new_xnid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (IS_ERR(xpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) f2fs_alloc_nid_failed(sbi, new_xnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return PTR_ERR(xpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) f2fs_alloc_nid_done(sbi, new_xnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) f2fs_update_inode_page(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) /* 3: update and set xattr node page dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) set_page_dirty(xpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) f2fs_put_page(xpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct f2fs_inode *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) nid_t ino = ino_of_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) struct node_info old_ni, new_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) err = f2fs_get_node_info(sbi, ino, &old_ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (unlikely(old_ni.blk_addr != NULL_ADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (!ipage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /* Should not use this inode from free nid list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) remove_free_nid(sbi, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (!PageUptodate(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) SetPageUptodate(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) fill_node_footer(ipage, ino, ino, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) set_cold_node(ipage, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) src = F2FS_INODE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) dst = F2FS_INODE(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) dst->i_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) dst->i_blocks = cpu_to_le64(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) dst->i_links = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) dst->i_xattr_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (dst->i_inline & F2FS_EXTRA_ATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) dst->i_extra_isize = src->i_extra_isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) i_inline_xattr_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) dst->i_inline_xattr_size = src->i_inline_xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) if (f2fs_sb_has_project_quota(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) i_projid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) dst->i_projid = src->i_projid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (f2fs_sb_has_inode_crtime(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) i_crtime_nsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) dst->i_crtime = src->i_crtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) dst->i_crtime_nsec = src->i_crtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) new_ni = old_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) new_ni.ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (unlikely(inc_valid_node_count(sbi, NULL, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) set_node_addr(sbi, &new_ni, NEW_ADDR, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) inc_valid_inode_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) unsigned int segno, struct f2fs_summary_block *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) struct f2fs_node *rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) struct f2fs_summary *sum_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) block_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int i, idx, last_offset, nrpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /* scan the node segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) last_offset = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) addr = START_BLOCK(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) sum_entry = &sum->entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) nrpages = min(last_offset - i, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) /* readahead node pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) for (idx = addr; idx < addr + nrpages; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) struct page *page = f2fs_get_tmp_page(sbi, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) rn = F2FS_NODE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) sum_entry->nid = rn->footer.nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) sum_entry->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) sum_entry->ofs_in_node = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) sum_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) invalidate_mapping_pages(META_MAPPING(sbi), addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) addr + nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) struct f2fs_journal *journal = curseg->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) down_write(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) for (i = 0; i < nats_in_cursum(journal); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) struct nat_entry *ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) struct f2fs_nat_entry raw_ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (f2fs_check_nid_range(sbi, nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) raw_ne = nat_in_journal(journal, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) ne = __lookup_nat_cache(nm_i, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (!ne) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) ne = __alloc_nat_entry(nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) __init_nat_entry(nm_i, ne, &raw_ne, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * if a free nat in journal has not been used after last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * checkpoint, we should remove it from available nids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * since later we will add it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (!get_nat_flag(ne, IS_DIRTY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) nm_i->available_nids--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) __set_nat_cache_dirty(nm_i, ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) update_nats_in_cursum(journal, -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) up_write(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) static void __adjust_nat_entry_set(struct nat_entry_set *nes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct list_head *head, int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) struct nat_entry_set *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (nes->entry_cnt >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) goto add_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) list_for_each_entry(cur, head, set_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (cur->entry_cnt >= nes->entry_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) list_add(&nes->set_list, cur->set_list.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) add_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) list_add_tail(&nes->set_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) struct f2fs_nat_block *nat_blk = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) int valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (!enabled_nat_bits(sbi, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (nat_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) for (; i < NAT_ENTRY_PER_BLOCK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) valid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (valid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) __set_bit_le(nat_index, nm_i->empty_nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) __clear_bit_le(nat_index, nm_i->full_nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) __clear_bit_le(nat_index, nm_i->empty_nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (valid == NAT_ENTRY_PER_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) __set_bit_le(nat_index, nm_i->full_nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) __clear_bit_le(nat_index, nm_i->full_nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) struct nat_entry_set *set, struct cp_control *cpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) struct f2fs_journal *journal = curseg->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) bool to_journal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) struct f2fs_nat_block *nat_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) struct nat_entry *ne, *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) * there are two steps to flush nat entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * #1, flush nat entries to journal in current hot data summary block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) * #2, flush nat entries to nat page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (enabled_nat_bits(sbi, cpc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) to_journal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (to_journal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) down_write(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) page = get_next_nat_page(sbi, start_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) nat_blk = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) f2fs_bug_on(sbi, !nat_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) /* flush dirty nats in nat entry set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) struct f2fs_nat_entry *raw_ne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) nid_t nid = nat_get_nid(ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if (to_journal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) offset = f2fs_lookup_journal_in_cursum(journal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) NAT_JOURNAL, nid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) f2fs_bug_on(sbi, offset < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) raw_ne = &nat_in_journal(journal, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) nid_in_journal(journal, offset) = cpu_to_le32(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) raw_ne = &nat_blk->entries[nid - start_nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) raw_nat_from_node_info(raw_ne, &ne->ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) nat_reset_flag(ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) __clear_nat_cache_dirty(NM_I(sbi), set, ne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (nat_get_blkaddr(ne) == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) add_free_nid(sbi, nid, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) spin_lock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) update_free_nid_bitmap(sbi, nid, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) spin_unlock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) if (to_journal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) up_write(&curseg->journal_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) __update_nat_bits(sbi, start_nid, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) /* Allow dirty nats by node block allocation in write_begin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (!set->entry_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) kmem_cache_free(nat_entry_set_slab, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) * This function is called during the checkpointing process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) struct f2fs_journal *journal = curseg->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) struct nat_entry_set *setvec[SETVEC_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) struct nat_entry_set *set, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) unsigned int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) nid_t set_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) LIST_HEAD(sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) * during unmount, let's flush nat_bits before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) * nat_cnt[DIRTY_NAT].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (enabled_nat_bits(sbi, cpc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) f2fs_down_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) remove_nats_in_journal(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (!nm_i->nat_cnt[DIRTY_NAT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) f2fs_down_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) * if there are no enough space in journal to store dirty nat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) * entries, remove all entries from journal and merge them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * into nat entry set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (enabled_nat_bits(sbi, cpc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) !__has_cursum_space(journal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) remove_nats_in_journal(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) while ((found = __gang_lookup_nat_set(nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) set_idx, SETVEC_SIZE, setvec))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) set_idx = setvec[found - 1]->set + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) for (idx = 0; idx < found; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) __adjust_nat_entry_set(setvec[idx], &sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) MAX_NAT_JENTRIES(journal));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) /* flush dirty nats in nat entry set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) list_for_each_entry_safe(set, tmp, &sets, set_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) err = __flush_nat_entry_set(sbi, set, cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /* Allow dirty nats by node block allocation in write_begin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) __u64 cp_ver = cur_cp_version(ckpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) block_t nat_bits_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (!enabled_nat_bits(sbi, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) nm_i->nat_bits = f2fs_kvzalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (!nm_i->nat_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) nm_i->nat_bits_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) for (i = 0; i < nm_i->nat_bits_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) page = f2fs_get_meta_page(sbi, nat_bits_addr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) page_address(page), F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) cp_ver |= (cur_cp_crc(ckpt) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) disable_nat_bits(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) nm_i->full_nat_bits = nm_i->nat_bits + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) f2fs_notice(sbi, "Found nat_bits in checkpoint");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) nid_t nid, last_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) if (!enabled_nat_bits(sbi, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) for (i = 0; i < nm_i->nat_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (i >= nm_i->nat_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) __set_bit_le(i, nm_i->nat_block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) nid = i * NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) last_nid = nid + NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) spin_lock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) for (; nid < last_nid; nid++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) update_free_nid_bitmap(sbi, nid, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) spin_unlock(&NM_I(sbi)->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) for (i = 0; i < nm_i->nat_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (i >= nm_i->nat_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) __set_bit_le(i, nm_i->nat_block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) static int init_node_manager(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) unsigned char *version_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) unsigned int nat_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) /* segment_count_nat includes pair segment so divide to 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) /* not used nids: 0, node, meta, (and root counted as valid node) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) F2FS_RESERVED_NODE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) nm_i->nid_cnt[FREE_NID] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) nm_i->nid_cnt[PREALLOC_NID] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) nm_i->ram_thresh = DEF_RAM_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) INIT_LIST_HEAD(&nm_i->free_nid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) INIT_LIST_HEAD(&nm_i->nat_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) spin_lock_init(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) mutex_init(&nm_i->build_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) spin_lock_init(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) init_f2fs_rwsem(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) if (!nm_i->nat_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) err = __get_nat_bitmaps(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (!nm_i->nat_bitmap_mir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) static int init_free_nid_cache(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) nm_i->free_nid_bitmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) nm_i->nat_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (!nm_i->free_nid_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) for (i = 0; i < nm_i->nat_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) if (!nm_i->free_nid_bitmap[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) if (!nm_i->nat_block_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) nm_i->free_nid_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) nm_i->nat_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (!nm_i->free_nid_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if (!sbi->nm_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) err = init_node_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) err = init_free_nid_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) /* load free nid status from nat_bits table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) load_free_nid_bitmap(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return f2fs_build_free_nids(sbi, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) struct f2fs_nm_info *nm_i = NM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) struct free_nid *i, *next_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) struct nat_entry *natvec[NATVEC_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) struct nat_entry_set *setvec[SETVEC_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) nid_t nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) unsigned int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (!nm_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) /* destroy free nid list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) __remove_free_nid(sbi, i, FREE_NID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) kmem_cache_free(free_nid_slab, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) spin_lock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) spin_unlock(&nm_i->nid_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) /* destroy nat cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) f2fs_down_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) while ((found = __gang_lookup_nat_cache(nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) nid, NATVEC_SIZE, natvec))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) nid = nat_get_nid(natvec[found - 1]) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) for (idx = 0; idx < found; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) spin_lock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) list_del(&natvec[idx]->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) spin_unlock(&nm_i->nat_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) __del_from_nat_cache(nm_i, natvec[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* destroy nat set cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) while ((found = __gang_lookup_nat_set(nm_i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) nid, SETVEC_SIZE, setvec))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) nid = setvec[found - 1]->set + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) for (idx = 0; idx < found; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) /* entry_cnt is not zero, when cp_error was occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) kmem_cache_free(nat_entry_set_slab, setvec[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) f2fs_up_write(&nm_i->nat_tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) kvfree(nm_i->nat_block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (nm_i->free_nid_bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) for (i = 0; i < nm_i->nat_blocks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) kvfree(nm_i->free_nid_bitmap[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) kvfree(nm_i->free_nid_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) kvfree(nm_i->free_nid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) kvfree(nm_i->nat_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) kvfree(nm_i->nat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) kvfree(nm_i->nat_bitmap_mir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) sbi->nm_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) kfree(nm_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) int __init f2fs_create_node_manager_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) sizeof(struct nat_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (!nat_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) sizeof(struct free_nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (!free_nid_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) goto destroy_nat_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) sizeof(struct nat_entry_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) if (!nat_entry_set_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) goto destroy_free_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) sizeof(struct fsync_node_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (!fsync_node_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) goto destroy_nat_entry_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) destroy_nat_entry_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) kmem_cache_destroy(nat_entry_set_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) destroy_free_nid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) kmem_cache_destroy(free_nid_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) destroy_nat_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) kmem_cache_destroy(nat_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) void f2fs_destroy_node_manager_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) kmem_cache_destroy(fsync_node_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) kmem_cache_destroy(nat_entry_set_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) kmem_cache_destroy(free_nid_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) kmem_cache_destroy(nat_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) }