^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/f2fs/gc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2012 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.samsung.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "gc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct kmem_cache *victim_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static unsigned int count_bits(const unsigned long *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned int offset, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int gc_thread_func(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct f2fs_sb_info *sbi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned int wait_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) wait_ms = gc_th->min_sleep_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bool sync_mode, foreground = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) wait_event_interruptible_timeout(*wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) kthread_should_stop() || freezing(current) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) waitqueue_active(fggc_wq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) gc_th->gc_wake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) msecs_to_jiffies(wait_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) foreground = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* give it a try one time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (gc_th->gc_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) gc_th->gc_wake = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (try_to_freeze()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) stat_other_skip_bggc_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) increase_sleep_time(gc_th, &wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) stat_other_skip_bggc_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!sb_start_write_trylock(sbi->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) stat_other_skip_bggc_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * [GC triggering condition]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * 0. GC is not conducted currently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * 1. There are enough dirty segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * 2. IO subsystem is idle by checking the # of writeback pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * 3. IO subsystem is idle by checking the # of requests in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * bdev's request list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Note) We have to avoid triggering GCs frequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Because it is possible that some segments can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * invalidated soon after by user update or deletion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * So, I'd like to wait some time to collect dirty segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (sbi->gc_mode == GC_URGENT_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) wait_ms = gc_th->urgent_sleep_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto do_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (foreground) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto do_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) stat_other_skip_bggc_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!is_idle(sbi, GC_TIME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) increase_sleep_time(gc_th, &wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) stat_io_skip_bggc_count(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (has_enough_invalid_blocks(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) decrease_sleep_time(gc_th, &wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) increase_sleep_time(gc_th, &wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) do_gc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!foreground)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) stat_inc_bggc_count(sbi->stat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* foreground GC was been triggered via f2fs_balance_fs() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (foreground)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sync_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* if return value is not zero, no victim was selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) wait_ms = gc_th->no_gc_sleep_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (foreground)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) wake_up_all(&gc_th->fggc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) trace_f2fs_background_gc(sbi->sb, wait_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) prefree_segments(sbi), free_segments(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* balancing f2fs's metadata periodically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) f2fs_balance_fs_bg(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) sb_end_write(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) } while (!kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct f2fs_gc_kthread *gc_th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev_t dev = sbi->sb->s_bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!gc_th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) gc_th->gc_wake = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sbi->gc_thread = gc_th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) init_waitqueue_head(&sbi->gc_thread->fggc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (IS_ERR(gc_th->f2fs_gc_task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) err = PTR_ERR(gc_th->f2fs_gc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) kfree(gc_th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sbi->gc_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!gc_th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kthread_stop(gc_th->f2fs_gc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) wake_up_all(&gc_th->fggc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) kfree(gc_th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) sbi->gc_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int gc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (gc_type == BG_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (sbi->am.atgc_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) gc_mode = GC_AT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) gc_mode = GC_CB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) gc_mode = GC_GREEDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) switch (sbi->gc_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case GC_IDLE_CB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) gc_mode = GC_CB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case GC_IDLE_GREEDY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case GC_URGENT_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) gc_mode = GC_GREEDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case GC_IDLE_AT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) gc_mode = GC_AT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return gc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int type, struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (p->alloc_mode == SSR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) p->gc_mode = GC_GREEDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) p->dirty_bitmap = dirty_i->dirty_segmap[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) p->max_search = dirty_i->nr_dirty[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) p->ofs_unit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (p->alloc_mode == AT_SSR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) p->gc_mode = GC_GREEDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) p->dirty_bitmap = dirty_i->dirty_segmap[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) p->max_search = dirty_i->nr_dirty[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) p->ofs_unit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) p->gc_mode = select_gc_type(sbi, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) p->ofs_unit = sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (__is_large_section(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) p->dirty_bitmap = dirty_i->dirty_secmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) p->max_search = count_bits(p->dirty_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 0, MAIN_SECS(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) p->max_search = dirty_i->nr_dirty[DIRTY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * adjust candidates range, should select all dirty segments for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * foreground GC and urgent GC cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (gc_type != FG_GC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) (sbi->gc_mode != GC_URGENT_HIGH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) p->max_search > sbi->max_victim_search)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) p->max_search = sbi->max_victim_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* let's select beginning hot/small space first in no_heap mode*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (test_opt(sbi, NOHEAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) p->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* SSR allocates in a segment unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (p->alloc_mode == SSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) else if (p->alloc_mode == AT_SSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* LFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (p->gc_mode == GC_GREEDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 2 * sbi->blocks_per_seg * p->ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) else if (p->gc_mode == GC_CB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else if (p->gc_mode == GC_AT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else /* No other gc_mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned int secno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * If the gc_type is FG_GC, we can select victim segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * selected by background GC before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Those segments guarantee they have small valid blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (sec_usage_check(sbi, secno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) clear_bit(secno, dirty_i->victim_secmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return GET_SEG_FROM_SEC(sbi, secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long long mtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned int vblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned char age = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned char u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (i = 0; i < usable_segs_per_sec; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) mtime += get_seg_entry(sbi, start + i)->mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) vblocks = get_valid_blocks(sbi, segno, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mtime = div_u64(mtime, usable_segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) vblocks = div_u64(vblocks, usable_segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u = (vblocks * 100) >> sbi->log_blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Handle if the system time has changed by the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (mtime < sit_i->min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sit_i->min_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (mtime > sit_i->max_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sit_i->max_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (sit_i->max_mtime != sit_i->min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sit_i->max_mtime - sit_i->min_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned int segno, struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (p->alloc_mode == SSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* alloc_mode == LFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (p->gc_mode == GC_GREEDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return get_valid_blocks(sbi, segno, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) else if (p->gc_mode == GC_CB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return get_cb_cost(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static unsigned int count_bits(const unsigned long *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned int offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int end = offset + len, sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (test_bit(offset++, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ++sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long long mtime, unsigned int segno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct rb_node *parent, struct rb_node **p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) bool left_most)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct victim_entry *ve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ve->mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ve->segno = segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rb_link_node(&ve->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) list_add_tail(&ve->list, &am->victim_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) am->victim_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return ve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void insert_victim_entry(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned long long mtime, unsigned int segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) bool left_most = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static void add_victim_entry(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct victim_sel_policy *p, unsigned int segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long long mtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (p->gc_mode == GC_AT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) get_valid_blocks(sbi, segno, true) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (i = 0; i < sbi->segs_per_sec; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mtime += get_seg_entry(sbi, start + i)->mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mtime = div_u64(mtime, sbi->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Handle if the system time has changed by the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (mtime < sit_i->min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) sit_i->min_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (mtime > sit_i->max_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) sit_i->max_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (mtime < sit_i->dirty_min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) sit_i->dirty_min_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (mtime > sit_i->dirty_max_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) sit_i->dirty_max_mtime = mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* don't choose young section as candidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) insert_victim_entry(sbi, mtime, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) bool left_most;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct rb_root_cached *root = &am->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct rb_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct victim_entry *ve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long long total_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long long age, u, accu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long long max_mtime = sit_i->dirty_max_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long long min_mtime = sit_i->dirty_min_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned int sec_blocks = BLKS_PER_SEC(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned int vblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned int dirty_threshold = max(am->max_candidate_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) am->candidate_ratio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) am->victim_count / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned int age_weight = am->age_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned int cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned int iter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (max_mtime < min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) max_mtime += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) total_time = max_mtime - min_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) accu = div64_u64(ULLONG_MAX, total_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) accu = min_t(unsigned long long, div_u64(accu, 100),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) DEFAULT_ACCURACY_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) node = rb_first_cached(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) re = rb_entry_safe(node, struct rb_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ve = (struct victim_entry *)re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* age = 10000 * x% * 60 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) age_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) vblocks = get_valid_blocks(sbi, ve->segno, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* u = 10000 * x% * 40 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) (100 - age_weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) f2fs_bug_on(sbi, age + u >= UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cost = UINT_MAX - (age + u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) iter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (cost < p->min_cost ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) (cost == p->min_cost && age > p->oldest_age)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) p->min_cost = cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) p->oldest_age = age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) p->min_segno = ve->segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (iter < dirty_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) node = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * select candidates around source section in range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * [target - dirty_threshold, target + dirty_threshold]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct rb_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct victim_entry *ve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) unsigned long long age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned long long max_mtime = sit_i->dirty_max_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned long long min_mtime = sit_i->dirty_min_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned int seg_blocks = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned int vblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned int dirty_threshold = max(am->max_candidate_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) am->candidate_ratio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) am->victim_count / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned int cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned int iter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int stage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (max_mtime < min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) max_mtime += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) next_stage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) node = lookup_central_victim(sbi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) next_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) re = rb_entry_safe(node, struct rb_entry, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (stage == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto skip_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ve = (struct victim_entry *)re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) goto skip_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) age = max_mtime - ve->mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) f2fs_bug_on(sbi, !vblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* rare case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (vblocks == seg_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto skip_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) iter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) age = max_mtime - abs(p->age - age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) cost = UINT_MAX - vblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (cost < p->min_cost ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) (cost == p->min_cost && age > p->oldest_age)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) p->min_cost = cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) p->oldest_age = age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) p->min_segno = ve->segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) skip_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (iter < dirty_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (stage == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) node = rb_prev(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) else if (stage == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) node = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) skip_stage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (stage < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) stage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) iter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto next_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct victim_sel_policy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) &sbi->am.root, true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (p->gc_mode == GC_AT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) atgc_lookup_victim(sbi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) else if (p->alloc_mode == AT_SSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) atssr_lookup_victim(sbi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static void release_victim_entry(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct victim_entry *ve, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) list_del(&ve->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) kmem_cache_free(victim_entry_slab, ve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) am->victim_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) am->root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) f2fs_bug_on(sbi, am->victim_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) f2fs_bug_on(sbi, !list_empty(&am->victim_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * This function is called from two paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * One is garbage collection and the other is SSR segment selection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * When it is called during GC, it just gets a victim segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * and it does not remove it from dirty seglist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * When it is called from SSR segment selection, it finds a segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * which has minimum valid blocks and removes it from dirty seglist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int get_victim_by_default(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) unsigned int *result, int gc_type, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) char alloc_mode, unsigned long long age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct sit_info *sm = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct victim_sel_policy p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned int secno, last_victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int last_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned int nsearched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) bool is_atgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mutex_lock(&dirty_i->seglist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) p.alloc_mode = alloc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) p.age = age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) p.age_threshold = sbi->am.age_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) select_policy(sbi, gc_type, type, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) p.min_segno = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) p.oldest_age = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) p.min_cost = get_max_cost(sbi, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) nsearched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (is_atgc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (*result != NULL_SEGNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!get_valid_blocks(sbi, *result, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) p.min_segno = *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (p.max_search == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (__is_large_section(sbi) && p.alloc_mode == LFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) p.min_segno = sbi->next_victim_seg[BG_GC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) *result = p.min_segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto got_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (gc_type == FG_GC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) p.min_segno = sbi->next_victim_seg[FG_GC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) *result = p.min_segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto got_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) last_victim = sm->last_victim[p.gc_mode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (p.alloc_mode == LFS && gc_type == FG_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) p.min_segno = check_bg_victims(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (p.min_segno != NULL_SEGNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned long cost, *dirty_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) unsigned int unit_no, segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dirty_bitmap = p.dirty_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unit_no = find_next_bit(dirty_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) last_segment / p.ofs_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) p.offset / p.ofs_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) segno = unit_no * p.ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (segno >= last_segment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (sm->last_victim[p.gc_mode]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) last_segment =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) sm->last_victim[p.gc_mode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) sm->last_victim[p.gc_mode] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) p.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) p.offset = segno + p.ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) nsearched++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * skip selecting the invalid segno (that is failed due to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * validity check failure during GC) to avoid endless GC loop in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * such cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (test_bit(segno, sm->invalid_segmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) secno = GET_SEC_FROM_SEG(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (sec_usage_check(sbi, secno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Don't touch checkpointed data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (p.alloc_mode == LFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * LFS is set to find source section during GC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * The victim should have no checkpointed data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (get_ckpt_valid_blocks(sbi, segno, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * SSR | AT_SSR are set to find target segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * for writes which can be full by checkpointed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * and newly written blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!f2fs_segment_has_free_slot(sbi, segno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (is_atgc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) add_victim_entry(sbi, &p, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cost = get_gc_cost(sbi, segno, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (p.min_cost > cost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) p.min_segno = segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) p.min_cost = cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (nsearched >= p.max_search) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) sm->last_victim[p.gc_mode] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) last_victim + p.ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) sm->last_victim[p.gc_mode] %=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) (MAIN_SECS(sbi) * sbi->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* get victim for GC_AT/AT_SSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (is_atgc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) lookup_victim_by_age(sbi, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) release_victim_entry(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (is_atgc && p.min_segno == NULL_SEGNO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) sm->elapsed_time < p.age_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) p.age_threshold = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (p.min_segno != NULL_SEGNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) got_result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (p.alloc_mode == LFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (gc_type == FG_GC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) sbi->cur_victim_sec = secno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) set_bit(secno, dirty_i->victim_secmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (p.min_segno != NULL_SEGNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sbi->cur_victim_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) prefree_segments(sbi), free_segments(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) mutex_unlock(&dirty_i->seglist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static const struct victim_selection default_v_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) .get_victim = get_victim_by_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct inode_entry *ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ie = radix_tree_lookup(&gc_list->iroot, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (ie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return ie->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct inode_entry *new_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (inode == find_gc_inode(gc_list, inode->i_ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) new_ie->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) list_add_tail(&new_ie->list, &gc_list->ilist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void put_gc_inode(struct gc_inode_list *gc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct inode_entry *ie, *next_ie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) iput(ie->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) list_del(&ie->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) kmem_cache_free(f2fs_inode_entry_slab, ie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static int check_valid_map(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) unsigned int segno, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct seg_entry *sentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) down_read(&sit_i->sentry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sentry = get_seg_entry(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ret = f2fs_test_bit(offset, sentry->cur_valid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) up_read(&sit_i->sentry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * This function compares node address got in summary with that in NAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * On validity, copy that node with cold status, otherwise (invalid node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * ignore that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int gc_node_segment(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct f2fs_summary *sum, unsigned int segno, int gc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct f2fs_summary *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) block_t start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) bool fggc = (gc_type == FG_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int submitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) start_addr = START_BLOCK(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) next_step:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) entry = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (fggc && phase == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) atomic_inc(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) for (off = 0; off < usable_blks_in_seg; off++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) nid_t nid = le32_to_cpu(entry->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct page *node_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* stop BG_GC if there is not enough free sections. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (check_valid_map(sbi, segno, off) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (phase == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) META_NAT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (phase == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) f2fs_ra_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* phase == 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) node_page = f2fs_get_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (IS_ERR(node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* block may become invalid during f2fs_get_node_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (check_valid_map(sbi, segno, off) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (f2fs_get_node_info(sbi, nid, &ni, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (ni.blk_addr != start_addr + off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) err = f2fs_move_node_page(node_page, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (!err && gc_type == FG_GC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) submitted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) stat_inc_node_blk_count(sbi, 1, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (++phase < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto next_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (fggc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) atomic_dec(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Calculate start block index indicating the given node offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * Be careful, caller should give this node offset only indicating direct node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * blocks. If any node offsets, which point the other types of node blocks such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * as indirect or double indirect node blocks, are given, it must be a caller's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned int bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (node_ofs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (node_ofs <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) bidx = node_ofs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) } else if (node_ofs <= indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) bidx = node_ofs - 2 - dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) bidx = node_ofs - 5 - dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct node_info *dni, block_t blkaddr, unsigned int *nofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct page *node_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) nid_t nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned int ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) block_t source_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) nid = le32_to_cpu(sum->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ofs_in_node = le16_to_cpu(sum->ofs_in_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) node_page = f2fs_get_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (IS_ERR(node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (f2fs_get_node_info(sbi, nid, dni, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (sum->version != dni->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) f2fs_warn(sbi, "%s: valid data with mismatched node version.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (f2fs_check_nid_range(sbi, dni->ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *nofs = ofs_of_node(node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) f2fs_put_page(node_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (source_blkaddr != blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_F2FS_CHECK_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) unsigned int segno = GET_SEGNO(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (unlikely(check_valid_map(sbi, segno, offset))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) blkaddr, source_blkaddr, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int ra_data_block(struct inode *inode, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct extent_info ei = {0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .sbi = sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) .ino = inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) .type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) .temp = COLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) .op = REQ_OP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) .op_flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .in_list = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .retry = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) page = f2fs_grab_cache_page(mapping, index, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (f2fs_lookup_extent_cache(inode, index, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dn.data_blkaddr = ei.blk + index - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) DATA_GENERIC_ENHANCE_READ))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto put_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) goto put_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto put_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) DATA_GENERIC_ENHANCE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) goto put_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* read page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) fio.page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * don't cache encrypted data into meta inode until previous dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * data were writebacked to avoid racing between GC and flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) FGP_LOCK | FGP_CREAT, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!fio.encrypted_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto put_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) err = f2fs_submit_page_bio(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) goto put_encrypted_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) f2fs_put_page(fio.encrypted_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) put_encrypted_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) f2fs_put_page(fio.encrypted_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) put_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * Move data block via META_MAPPING while keeping locked data page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * This can be used to move blocks, aka LBAs, directly on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static int move_data_block(struct inode *inode, block_t bidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int gc_type, unsigned int segno, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .sbi = F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .ino = inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .temp = COLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .op = REQ_OP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .op_flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .in_list = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .retry = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct f2fs_summary sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct page *page, *mpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) block_t newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) bool lfs_mode = f2fs_lfs_mode(fio.sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* do not read out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (f2fs_is_atomic_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (f2fs_is_pinned_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) f2fs_pin_file_control(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) goto put_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * don't cache encrypted data into meta inode until previous dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * data were writebacked to avoid racing between GC and flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto put_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* read page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) fio.page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (lfs_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) f2fs_down_write(&fio.sbi->io_order_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) fio.old_blkaddr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!mpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) goto up_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) fio.encrypted_page = mpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* read source block in mpage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!PageUptodate(mpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) err = f2fs_submit_page_bio(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) f2fs_put_page(mpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) goto up_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) lock_page(mpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) !PageUptodate(mpage))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) f2fs_put_page(mpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) goto up_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* allocate block address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) &sum, type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!fio.encrypted_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) f2fs_put_page(mpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto recover_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* write target block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) memcpy(page_address(fio.encrypted_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) page_address(mpage), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) f2fs_put_page(mpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) invalidate_mapping_pages(META_MAPPING(fio.sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) fio.old_blkaddr, fio.old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) set_page_dirty(fio.encrypted_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (clear_page_dirty_for_io(fio.encrypted_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dec_page_count(fio.sbi, F2FS_DIRTY_META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) set_page_writeback(fio.encrypted_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) fio.op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) fio.op_flags = REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) fio.new_blkaddr = newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) f2fs_submit_page_write(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (fio.retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (PageWriteback(fio.encrypted_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) end_page_writeback(fio.encrypted_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto put_page_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) f2fs_update_data_blkaddr(&dn, newaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) set_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (page->index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) put_page_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) f2fs_put_page(fio.encrypted_page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) recover_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) true, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) up_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (lfs_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) f2fs_up_write(&fio.sbi->io_order_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) put_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) unsigned int segno, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) page = f2fs_get_lock_data_page(inode, bidx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (f2fs_is_atomic_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (f2fs_is_pinned_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (gc_type == FG_GC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) f2fs_pin_file_control(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (gc_type == BG_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (PageWriteback(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) set_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .sbi = F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .ino = inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) .type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .temp = COLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .op_flags = REQ_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .old_blkaddr = NULL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .need_lock = LOCK_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .io_type = FS_GC_DATA_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) bool is_dirty = PageDirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (clear_page_dirty_for_io(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) inode_dec_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) f2fs_remove_dirty_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) set_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) err = f2fs_do_write_data_page(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) congestion_wait(BLK_RW_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (is_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * This function tries to get parent node of victim data block, and identifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * data block validity. If the block is valid, copy that with cold status and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * modify parent node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * If the parent node is not valid or the data block address is different,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * the victim data block is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) bool force_migrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct super_block *sb = sbi->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct f2fs_summary *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) block_t start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) int phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int submitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) start_addr = START_BLOCK(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) next_step:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) entry = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) for (off = 0; off < usable_blks_in_seg; off++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct page *data_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct node_info dni; /* dnode info for the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) unsigned int ofs_in_node, nofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) block_t start_bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) nid_t nid = le32_to_cpu(entry->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * stop BG_GC if there is not enough free sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * Or, stop GC if the segment becomes fully valid caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * race condition along with SSR block allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) (!force_migrate && get_valid_blocks(sbi, segno, true) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) BLKS_PER_SEC(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (check_valid_map(sbi, segno, off) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (phase == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) META_NAT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (phase == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) f2fs_ra_node_page(sbi, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Get an inode by ino with checking validity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (phase == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) f2fs_ra_node_page(sbi, dni.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ofs_in_node = le16_to_cpu(entry->ofs_in_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (phase == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) inode = f2fs_iget(sb, dni.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (IS_ERR(inode) || is_bad_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (!f2fs_down_write_trylock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) sbi->skipped_gc_rwsem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (f2fs_post_read_required(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int err = ra_data_block(inode, start_bidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) add_gc_inode(gc_list, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) data_page = f2fs_get_read_data_page(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) start_bidx, REQ_RAHEAD, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (IS_ERR(data_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) f2fs_put_page(data_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) add_gc_inode(gc_list, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* phase 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) inode = find_gc_inode(gc_list, dni.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) bool locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) sbi->skipped_gc_rwsem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!f2fs_down_write_trylock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) &fi->i_gc_rwsem[WRITE])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) sbi->skipped_gc_rwsem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) f2fs_up_write(&fi->i_gc_rwsem[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* wait for all inflight aio data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) start_bidx = f2fs_start_bidx_of_node(nofs, inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) + ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (f2fs_post_read_required(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) err = move_data_block(inode, start_bidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) gc_type, segno, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) err = move_data_page(inode, start_bidx, gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) segno, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!err && (gc_type == FG_GC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) f2fs_post_read_required(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) submitted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) f2fs_up_write(&fi->i_gc_rwsem[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) stat_inc_data_blk_count(sbi, 1, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (++phase < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) goto next_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int gc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct sit_info *sit_i = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) down_write(&sit_i->sentry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) NO_CHECK_TYPE, LFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) up_write(&sit_i->sentry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int do_garbage_collect(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) unsigned int start_segno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct gc_inode_list *gc_list, int gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) bool force_migrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct page *sum_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct f2fs_summary_block *sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) unsigned int segno = start_segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) unsigned int end_segno = start_segno + sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) int seg_freed = 0, migrated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) SUM_TYPE_DATA : SUM_TYPE_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) int submitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (__is_large_section(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) end_segno = rounddown(end_segno, sbi->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * zone-capacity can be less than zone-size in zoned devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * resulting in less than expected usable segments in the zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * calculate the end segno in the zone which can be garbage collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (f2fs_sb_has_blkzoned(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) end_segno -= sbi->segs_per_sec -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) f2fs_usable_segs_in_sec(sbi, segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* readahead multi ssa blocks those have contiguous address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (__is_large_section(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) end_segno - segno, META_SSA, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* reference all summary page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) while (segno < end_segno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) sum_page = f2fs_get_sum_page(sbi, segno++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (IS_ERR(sum_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int err = PTR_ERR(sum_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) end_segno = segno - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) for (segno = start_segno; segno < end_segno; segno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) sum_page = find_get_page(META_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) GET_SUM_BLOCK(sbi, segno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) f2fs_put_page(sum_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) f2fs_put_page(sum_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) unlock_page(sum_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) for (segno = start_segno; segno < end_segno; segno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* find segment summary of victim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) sum_page = find_get_page(META_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) GET_SUM_BLOCK(sbi, segno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) f2fs_put_page(sum_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (get_valid_blocks(sbi, segno, false) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) goto freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (gc_type == BG_GC && __is_large_section(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) migrated >= sbi->migration_granularity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) sum = page_address(sum_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (type != GET_SUM_TYPE((&sum->footer))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) segno, type, GET_SUM_TYPE((&sum->footer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * this is to avoid deadlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * - lock_page(sum_page) - f2fs_replace_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * - check_valid_map() - down_write(sentry_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * - down_read(sentry_lock) - change_curseg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * - lock_page(sum_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (type == SUM_TYPE_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) submitted += gc_node_segment(sbi, sum->entries, segno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) submitted += gc_data_segment(sbi, sum->entries, gc_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) segno, gc_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) force_migrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) stat_inc_seg_count(sbi, type, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) sbi->gc_reclaimed_segs[sbi->gc_mode]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) migrated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) freed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (gc_type == FG_GC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) get_valid_blocks(sbi, segno, false) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) seg_freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (__is_large_section(sbi) && segno + 1 < end_segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) sbi->next_victim_seg[gc_type] = segno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) f2fs_put_page(sum_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (submitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) f2fs_submit_merged_write(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) (type == SUM_TYPE_NODE) ? NODE : DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) stat_inc_call_count(sbi->stat_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return seg_freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) bool background, bool force, unsigned int segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) int gc_type = sync ? FG_GC : BG_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) int sec_freed = 0, seg_freed = 0, total_freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct cp_control cpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) unsigned int init_segno = segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct gc_inode_list gc_list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) .ilist = LIST_HEAD_INIT(gc_list.ilist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) unsigned long long first_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) unsigned int skipped_round = 0, round = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) trace_f2fs_gc_begin(sbi->sb, sync, background,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) get_pages(sbi, F2FS_DIRTY_NODES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) get_pages(sbi, F2FS_DIRTY_DENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) get_pages(sbi, F2FS_DIRTY_IMETA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) free_sections(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) free_segments(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) reserved_segments(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) prefree_segments(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) cpc.reason = __get_cp_reason(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) sbi->skipped_gc_rwsem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) first_skipped = last_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) gc_more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * For example, if there are many prefree_segments below given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * threshold, we can make them free by checkpoint. Then, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * secure free segments which doesn't need fggc any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (prefree_segments(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ret = f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (has_not_enough_free_secs(sbi, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) gc_type = FG_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (gc_type == BG_GC && !background) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) ret = __get_victim(sbi, &segno, gc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (gc_type == FG_GC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) sec_freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) total_freed += seg_freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (gc_type == FG_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) sbi->skipped_gc_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) skipped_round++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) last_skipped = sbi->skipped_atomic_files[FG_GC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) round++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (gc_type == FG_GC && seg_freed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) sbi->cur_victim_sec = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) goto stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (skipped_round <= MAX_SKIP_GC_COUNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) skipped_round * 2 < round) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) segno = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) goto gc_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (first_skipped < last_skipped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) (last_skipped - first_skipped) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) sbi->skipped_gc_rwsem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) f2fs_drop_inmem_pages_all(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) segno = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto gc_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ret = f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) get_pages(sbi, F2FS_DIRTY_NODES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) get_pages(sbi, F2FS_DIRTY_DENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) get_pages(sbi, F2FS_DIRTY_IMETA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) free_sections(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) free_segments(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) reserved_segments(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) prefree_segments(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) put_gc_inode(&gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (sync && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) ret = sec_freed ? 0 : -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) int __init f2fs_create_garbage_collection_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) sizeof(struct victim_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (!victim_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) void f2fs_destroy_garbage_collection_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) kmem_cache_destroy(victim_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static void init_atgc_management(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct atgc_management *am = &sbi->am;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (test_opt(sbi, ATGC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) am->atgc_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) am->root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) INIT_LIST_HEAD(&am->victim_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) am->victim_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) DIRTY_I(sbi)->v_ops = &default_v_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* give warm/cold data area from slower device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) SIT_I(sbi)->last_victim[ALLOC_NEXT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) init_atgc_management(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static int free_segment_range(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) unsigned int secs, bool gc_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) unsigned int segno, next_inuse, start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) int gc_mode, gc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /* Force block allocation for GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) MAIN_SECS(sbi) -= secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) start = MAIN_SECS(sbi) * sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) end = MAIN_SEGS(sbi) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) mutex_lock(&DIRTY_I(sbi)->seglist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (SIT_I(sbi)->last_victim[gc_mode] >= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) SIT_I(sbi)->last_victim[gc_mode] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (sbi->next_victim_seg[gc_type] >= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) sbi->next_victim_seg[gc_type] = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) /* Move out cursegs from the target range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) f2fs_allocate_segment_for_resize(sbi, type, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* do GC to move out valid blocks in the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct gc_inode_list gc_list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) .ilist = LIST_HEAD_INIT(gc_list.ilist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) put_gc_inode(&gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (!gc_only && get_valid_blocks(sbi, segno, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (gc_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) err = f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (next_inuse <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) f2fs_err(sbi, "segno %u should be free but still inuse!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) next_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) MAIN_SECS(sbi) += secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int section_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) int segment_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) int segment_count_main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) long long block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int segs = secs * sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) f2fs_down_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) section_count = le32_to_cpu(raw_sb->section_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) segment_count = le32_to_cpu(raw_sb->segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) block_count = le64_to_cpu(raw_sb->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) raw_sb->section_count = cpu_to_le32(section_count + secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) raw_sb->segment_count = cpu_to_le32(segment_count + segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) raw_sb->block_count = cpu_to_le64(block_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) (long long)segs * sbi->blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (f2fs_is_multi_device(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) int last_dev = sbi->s_ndevs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) int dev_segs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) le32_to_cpu(raw_sb->devs[last_dev].total_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) raw_sb->devs[last_dev].total_segments =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) cpu_to_le32(dev_segs + segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) f2fs_up_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) int segs = secs * sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) long long blks = (long long)segs * sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) long long user_block_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) MAIN_SECS(sbi) += secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (f2fs_is_multi_device(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) int last_dev = sbi->s_ndevs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) FDEV(last_dev).total_segments =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) (int)FDEV(last_dev).total_segments + segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) FDEV(last_dev).end_blk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) (long long)FDEV(last_dev).end_blk + blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) (int)(blks >> sbi->log_blocks_per_blkz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) __u64 old_block_count, shrunk_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) unsigned int secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) __u32 rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (block_count > old_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (f2fs_is_multi_device(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int last_dev = sbi->s_ndevs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) __u64 last_segs = FDEV(last_dev).total_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (block_count + last_segs * sbi->blocks_per_seg <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) old_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* new fs size should align to section size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (block_count == old_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) f2fs_err(sbi, "Should run fsck to repair first.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (test_opt(sbi, DISABLE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) f2fs_err(sbi, "Checkpoint should be enabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) shrunk_blocks = old_block_count - block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /* stop other GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (!f2fs_down_write_trylock(&sbi->gc_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /* stop CP to protect MAIN_SEC in free_segment_range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) spin_lock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (shrunk_blocks + valid_user_blocks(sbi) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) sbi->current_reserved_blocks + sbi->unusable_block_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) spin_unlock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) err = free_segment_range(sbi, secs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) set_sbi_flag(sbi, SBI_IS_RESIZEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) freeze_super(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) f2fs_down_write(&sbi->cp_global_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) spin_lock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (shrunk_blocks + valid_user_blocks(sbi) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) sbi->current_reserved_blocks + sbi->unusable_block_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) sbi->user_block_count -= shrunk_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) spin_unlock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) err = free_segment_range(sbi, secs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) goto recover_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) update_sb_metadata(sbi, -secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) err = f2fs_commit_super(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) update_sb_metadata(sbi, secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) goto recover_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) update_fs_metadata(sbi, -secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) set_sbi_flag(sbi, SBI_IS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) err = f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) update_fs_metadata(sbi, secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) update_sb_metadata(sbi, secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) f2fs_commit_super(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) recover_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) spin_lock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) sbi->user_block_count += shrunk_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) spin_unlock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) f2fs_up_write(&sbi->cp_global_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) thaw_super(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }