^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * segment.c - NILFS segment constructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Written by Ryusuke Konishi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "nilfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "btnode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "page.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "sufile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "cpfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "ifile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "segbuf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Segment constructor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SC_MAX_SEGDELTA 64 /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Upper limit of the number of segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * appended in collection retry loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Construction mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) SC_LSEG_SR = 1, /* Make a logical segment having a super root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) SC_LSEG_DSYNC, /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Flush data blocks of a given file and make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * a logical segment without a super root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) SC_FLUSH_FILE, /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Flush data files, leads to segment writes without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * creating a checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) SC_FLUSH_DAT, /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Flush DAT file. This also creates segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * without a checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Stage numbers of dirty block collection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) NILFS_ST_INIT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) NILFS_ST_GC, /* Collecting dirty blocks for GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) NILFS_ST_FILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) NILFS_ST_IFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) NILFS_ST_CPFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) NILFS_ST_SUFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) NILFS_ST_DAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) NILFS_ST_SR, /* Super root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) NILFS_ST_DSYNC, /* Data sync blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) NILFS_ST_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <trace/events/nilfs2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * the variable must use them because transition of stage count must involve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * trace events (trace_nilfs2_collection_stage_transition).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * produce tracepoint events. It is provided just for making the intention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) sci->sc_stage.scnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) trace_nilfs2_collection_stage_transition(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sci->sc_stage.scnt = next_scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) trace_nilfs2_collection_stage_transition(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return sci->sc_stage.scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* State flags of collection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Operations depending on the construction mode and file type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct nilfs_sc_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct inode *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct inode *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct inode *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void (*write_data_binfo)(struct nilfs_sc_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct nilfs_segsum_pointer *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) union nilfs_binfo *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void (*write_node_binfo)(struct nilfs_sc_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct nilfs_segsum_pointer *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) union nilfs_binfo *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Other definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define nilfs_cnt32_gt(a, b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) (typecheck(__u32, a) && typecheck(__u32, b) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ((__s32)(b) - (__s32)(a) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define nilfs_cnt32_ge(a, b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (typecheck(__u32, a) && typecheck(__u32, b) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ((__s32)(a) - (__s32)(b) >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int nilfs_prepare_segment_lock(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct nilfs_transaction_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct nilfs_transaction_info *cur_ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void *save = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (cur_ti) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (cur_ti->ti_magic == NILFS_TI_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return ++cur_ti->ti_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * If journal_info field is occupied by other FS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * it is saved and will be restored on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * nilfs_transaction_commit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) nilfs_warn(sb, "journal info from a different FS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) save = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!ti) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ti->ti_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ti->ti_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ti->ti_save = save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ti->ti_magic = NILFS_TI_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) current->journal_info = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * nilfs_transaction_begin - start indivisible file operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @ti: nilfs_transaction_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @vacancy_check: flags for vacancy rate checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * nilfs_transaction_begin() acquires a reader/writer semaphore, called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * the segment semaphore, to make a segment construction and write tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * exclusive. The function is used with nilfs_transaction_commit() in pairs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * The region enclosed by these two functions can be nested. To avoid a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * deadlock, the semaphore is only acquired or released in the outermost call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * This function allocates a nilfs_transaction_info struct to keep context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * information on it. It is initialized and hooked onto the current task in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * the outermost call. If a pre-allocated struct is given to @ti, it is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * instead; otherwise a new struct is assigned from a slab.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * When @vacancy_check flag is set, this function will check the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * free space, and will wait for the GC to reclaim disk space if low capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Return Value: On success, 0 is returned. On error, one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * negative error code is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * %-ENOMEM - Insufficient memory available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * %-ENOSPC - No space left on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int nilfs_transaction_begin(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct nilfs_transaction_info *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int vacancy_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct the_nilfs *nilfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int ret = nilfs_prepare_segment_lock(sb, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct nilfs_transaction_info *trace_ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) trace_ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) trace_nilfs2_transaction_transition(sb, trace_ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) trace_ti->ti_count, trace_ti->ti_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) TRACE_NILFS2_TRANSACTION_BEGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sb_start_intwrite(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) down_read(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (vacancy_check && nilfs_near_disk_full(nilfs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) up_read(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) trace_ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) trace_ti->ti_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) TRACE_NILFS2_TRANSACTION_BEGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) current->journal_info = ti->ti_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) kmem_cache_free(nilfs_transaction_cachep, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) sb_end_intwrite(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * nilfs_transaction_commit - commit indivisible file operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * nilfs_transaction_commit() releases the read semaphore which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * acquired by nilfs_transaction_begin(). This is only performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * in outermost call of this function. If a commit flag is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * nilfs_transaction_commit() sets a timer to start the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * constructor. If a sync flag is set, it starts construction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int nilfs_transaction_commit(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct nilfs_transaction_info *ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ti->ti_flags |= NILFS_TI_COMMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (ti->ti_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ti->ti_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (nilfs->ns_writer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (ti->ti_flags & NILFS_TI_COMMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) nilfs_segctor_start_timer(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) nilfs_segctor_do_flush(sci, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) up_read(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) current->journal_info = ti->ti_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (ti->ti_flags & NILFS_TI_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) err = nilfs_construct_segment(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) kmem_cache_free(nilfs_transaction_cachep, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) sb_end_intwrite(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void nilfs_transaction_abort(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct nilfs_transaction_info *ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (ti->ti_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ti->ti_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) up_read(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) current->journal_info = ti->ti_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kmem_cache_free(nilfs_transaction_cachep, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) sb_end_intwrite(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void nilfs_relax_pressure_in_lock(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!sci || !sci->sc_flush_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) up_read(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) down_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (sci->sc_flush_request &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct nilfs_transaction_info *ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ti->ti_flags |= NILFS_TI_WRITER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) nilfs_segctor_do_immediate_flush(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ti->ti_flags &= ~NILFS_TI_WRITER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) downgrade_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void nilfs_transaction_lock(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct nilfs_transaction_info *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int gcflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct nilfs_transaction_info *cur_ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) WARN_ON(cur_ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ti->ti_flags = NILFS_TI_WRITER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ti->ti_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ti->ti_save = cur_ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ti->ti_magic = NILFS_TI_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) current->journal_info = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) down_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) nilfs_segctor_do_immediate_flush(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) up_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (gcflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ti->ti_flags |= NILFS_TI_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void nilfs_transaction_unlock(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct nilfs_transaction_info *ti = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) BUG_ON(ti->ti_count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) up_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) current->journal_info = ti->ti_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned int blocksize = sci->sc_super->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (unlikely(ssp->offset + bytes > blocksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ssp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) &segbuf->sb_segsum_buffers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) p = ssp->bh->b_data + ssp->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ssp->offset += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @sci: nilfs_sc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct buffer_head *sumbh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned int sumbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (nilfs_doing_gc())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) flags = NILFS_SS_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sumbytes = segbuf->sb_sum.sumbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -E2BIG; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * The current segment is filled up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * (internal code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return nilfs_segctor_reset_segment_buffer(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) err = nilfs_segctor_feed_segment(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) segbuf->sb_sum.flags |= NILFS_SS_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Functions for making segment summary and payloads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int nilfs_segctor_segsum_block_required(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned int binfo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned int blocksize = sci->sc_super->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Size of finfo and binfo is enough small against blocksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ssp->offset + binfo_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sci->sc_curseg->sb_sum.nfinfo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sci->sc_binfo_ptr = sci->sc_finfo_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) nilfs_segctor_map_segsum_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (NILFS_I(inode)->i_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* skip finfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct nilfs_finfo *finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __u64 cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (sci->sc_blk_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (test_bit(NILFS_I_GCINODE, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cno = ii->i_cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) cno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cno = sci->sc_cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) sizeof(*finfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) finfo->fi_ino = cpu_to_le64(inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) finfo->fi_cno = cpu_to_le64(cno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sci->sc_finfo_ptr = sci->sc_binfo_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) unsigned int binfo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int required, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) segbuf = sci->sc_curseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) required = nilfs_segctor_segsum_block_required(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) sci, &sci->sc_binfo_ptr, binfo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) nilfs_segctor_end_finfo(sci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) err = nilfs_segctor_feed_segment(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (unlikely(required)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) err = nilfs_segbuf_extend_segsum(segbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (sci->sc_blk_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) nilfs_segctor_begin_finfo(sci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* Substitution to vblocknr is delayed until update_blocknr() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) nilfs_segbuf_add_file_buffer(segbuf, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) sci->sc_blk_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Callback functions that enumerate, mark, and collect dirty blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct buffer_head *bh, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) err = nilfs_segctor_add_file_block(sci, bh, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sizeof(struct nilfs_binfo_v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sci->sc_datablk_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) WARN_ON(!buffer_dirty(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) union nilfs_binfo *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sci, ssp, sizeof(*binfo_v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *binfo_v = binfo->bi_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) union nilfs_binfo *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) sci, ssp, sizeof(*vblocknr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) *vblocknr = binfo->bi_v.bi_vblocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static const struct nilfs_sc_operations nilfs_sc_file_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .collect_data = nilfs_collect_file_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .collect_node = nilfs_collect_file_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) .collect_bmap = nilfs_collect_file_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) .write_data_binfo = nilfs_write_file_data_binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) .write_node_binfo = nilfs_write_file_node_binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct buffer_head *bh, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sci->sc_datablk_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct buffer_head *bh, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) WARN_ON(!buffer_dirty(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return nilfs_segctor_add_file_block(sci, bh, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sizeof(struct nilfs_binfo_dat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) union nilfs_binfo *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) sizeof(*blkoff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *blkoff = binfo->bi_dat.bi_blkoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct nilfs_segsum_pointer *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) union nilfs_binfo *binfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct nilfs_binfo_dat *binfo_dat =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *binfo_dat = binfo->bi_dat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .collect_data = nilfs_collect_dat_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .collect_node = nilfs_collect_file_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .collect_bmap = nilfs_collect_dat_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .write_data_binfo = nilfs_write_dat_data_binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .write_node_binfo = nilfs_write_dat_node_binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) .collect_data = nilfs_collect_file_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .collect_node = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .collect_bmap = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .write_data_binfo = nilfs_write_file_data_binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .write_node_binfo = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct list_head *listp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) size_t nlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) loff_t start, loff_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pgoff_t index = 0, last = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) size_t ndirties = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (unlikely(start != 0 || end != LLONG_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * A valid range is given for sync-ing data pages. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * range is rounded to per-page; extra dirty buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * may be included if blocksize < pagesize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) index = start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) last = end >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (unlikely(index > last) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) PAGECACHE_TAG_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return ndirties;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for (i = 0; i < pagevec_count(&pvec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) create_empty_buffers(page, i_blocksize(inode), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!buffer_dirty(bh) || buffer_async_write(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) list_add_tail(&bh->b_assoc_buffers, listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ndirties++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (unlikely(ndirties >= nlimit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return ndirties;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) } while (bh = bh->b_this_page, bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct list_head *listp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct address_space *mapping = &ii->i_btnode_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pgoff_t index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) while (pagevec_lookup_tag(&pvec, mapping, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) PAGECACHE_TAG_DIRTY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) for (i = 0; i < pagevec_count(&pvec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) bh = head = page_buffers(pvec.pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (buffer_dirty(bh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) !buffer_async_write(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) list_add_tail(&bh->b_assoc_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void nilfs_dispose_list(struct the_nilfs *nilfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct list_head *head, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct nilfs_inode_info *ii, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned int nv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) list_for_each_entry_safe(ii, n, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) list_del_init(&ii->i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (unlikely(ii->i_bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) brelse(ii->i_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ii->i_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_bit(NILFS_I_QUEUED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) list_add_tail(&ii->i_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) &nilfs->ns_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ivec[nv++] = ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (nv == SC_N_INODEVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) for (pii = ivec; nv > 0; pii++, nv--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) iput(&(*pii)->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static void nilfs_iput_work_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) sc_iput_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct nilfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (nilfs_mdt_fetch_dirty(root->ifile))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return list_empty(&sci->sc_dirty_files) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sci->sc_nfreesegs == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) nilfs_mdt_clear_dirty(sci->sc_root->ifile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) nilfs_mdt_clear_dirty(nilfs->ns_sufile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) nilfs_mdt_clear_dirty(nilfs->ns_dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct buffer_head *bh_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct nilfs_checkpoint *raw_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* XXX: this interface will be changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) &raw_cp, &bh_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * The following code is duplicated with cpfile. But, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * needed to collect the checkpoint even if it was not newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) mark_buffer_dirty(bh_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) nilfs_cpfile_put_checkpoint(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) WARN_ON(err == -EINVAL || err == -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct buffer_head *bh_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct nilfs_checkpoint *raw_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) &raw_cp, &bh_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) WARN_ON(err == -EINVAL || err == -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) goto failed_ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) raw_cp->cp_snapshot_list.ssl_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) raw_cp->cp_snapshot_list.ssl_prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) raw_cp->cp_inodes_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) raw_cp->cp_blocks_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) raw_cp->cp_nblk_inc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) nilfs_checkpoint_clear_minor(raw_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) nilfs_checkpoint_set_minor(raw_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) nilfs_write_inode_common(sci->sc_root->ifile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) &raw_cp->cp_ifile_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) failed_ibh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static void nilfs_fill_in_file_bmap(struct inode *ifile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct nilfs_inode_info *ii)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct buffer_head *ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct nilfs_inode *raw_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ibh = ii->i_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) BUG_ON(!ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) nilfs_bmap_write(ii->i_bmap, raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) set_bit(NILFS_I_COLLECTED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct buffer_head *bh_sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct nilfs_super_root *raw_sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) unsigned int isz, srsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) isz = nilfs->ns_inode_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) srsz = NILFS_SR_BYTES(isz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) raw_sr->sr_bytes = cpu_to_le16(srsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) raw_sr->sr_nongc_ctime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) = cpu_to_le64(nilfs_doing_gc() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) raw_sr->sr_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) NILFS_SR_DAT_OFFSET(isz), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) NILFS_SR_CPFILE_OFFSET(isz), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) NILFS_SR_SUFILE_OFFSET(isz), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static void nilfs_redirty_inodes(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) list_for_each_entry(ii, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) clear_bit(NILFS_I_COLLECTED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void nilfs_drop_collected_inodes(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) list_for_each_entry(ii, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) set_bit(NILFS_I_UPDATED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct list_head *listp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int (*collect)(struct nilfs_sc_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct buffer_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct inode *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct buffer_head *bh, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (collect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) list_del_init(&bh->b_assoc_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) err = collect(sci, bh, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto dispose_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dispose_buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) while (!list_empty(listp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) bh = list_first_entry(listp, struct buffer_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) b_assoc_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) list_del_init(&bh->b_assoc_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* Remaining number of blocks within segment buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return sci->sc_segbuf_nblocks -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) const struct nilfs_sc_operations *sc_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) LIST_HEAD(data_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) LIST_HEAD(node_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) size_t n, rest = nilfs_segctor_buffer_rest(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) n = nilfs_lookup_dirty_data_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) inode, &data_buffers, rest + 1, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (n > rest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = nilfs_segctor_apply_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) sci, inode, &data_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sc_ops->collect_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) BUG_ON(!err); /* always receive -E2BIG or true error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) err = nilfs_segctor_apply_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sci, inode, &data_buffers, sc_ops->collect_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* dispose node list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) nilfs_segctor_apply_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) sci, inode, &node_buffers, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) sci->sc_stage.flags |= NILFS_CF_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* Collect node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) err = nilfs_segctor_apply_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) sci, inode, &node_buffers, sc_ops->collect_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err = nilfs_segctor_apply_buffers(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sci, inode, &node_buffers, sc_ops->collect_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) nilfs_segctor_end_finfo(sci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) sci->sc_stage.flags &= ~NILFS_CF_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) break_or_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) LIST_HEAD(data_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) size_t n, rest = nilfs_segctor_buffer_rest(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) sci->sc_dsync_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sci->sc_dsync_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) nilfs_collect_file_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) nilfs_segctor_end_finfo(sci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) BUG_ON(n > rest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* always receive -E2BIG or true error if n > rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) size_t ndone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) switch (nilfs_sc_cstage_get(sci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) case NILFS_ST_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Pre-processes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) sci->sc_stage.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) sci->sc_nblk_inc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (mode == SC_LSEG_DSYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) goto dsync_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sci->sc_stage.dirty_file_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sci->sc_stage.gc_inode_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (mode == SC_FLUSH_DAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto dat_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case NILFS_ST_GC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (nilfs_doing_gc()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) head = &sci->sc_gc_inodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) head, i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) list_for_each_entry_continue(ii, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) err = nilfs_segctor_scan_file(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) sci, &ii->vfs_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) &nilfs_sc_file_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) sci->sc_stage.gc_inode_ptr = list_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ii->i_dirty.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct nilfs_inode_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) set_bit(NILFS_I_COLLECTED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) sci->sc_stage.gc_inode_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case NILFS_ST_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) head = &sci->sc_dirty_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) list_for_each_entry_continue(ii, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) clear_bit(NILFS_I_DIRTY, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) &nilfs_sc_file_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) sci->sc_stage.dirty_file_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) list_entry(ii->i_dirty.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct nilfs_inode_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto break_or_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* XXX: required ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) sci->sc_stage.dirty_file_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (mode == SC_FLUSH_FILE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) case NILFS_ST_IFILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) &nilfs_sc_file_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* Creating a checkpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) err = nilfs_segctor_create_checkpoint(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) case NILFS_ST_CPFILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) &nilfs_sc_file_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) case NILFS_ST_SUFILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) sci->sc_nfreesegs, &ndone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) nilfs_sufile_cancel_freev(nilfs->ns_sufile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) sci->sc_freesegs, ndone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) sci->sc_stage.flags |= NILFS_CF_SUFREED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) &nilfs_sc_file_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case NILFS_ST_DAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) dat_stage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) &nilfs_sc_dat_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (mode == SC_FLUSH_DAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) nilfs_sc_cstage_inc(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case NILFS_ST_SR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (mode == SC_LSEG_SR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* Appending a super root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) err = nilfs_segctor_add_super_root(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* End of a logical segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case NILFS_ST_DSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dsync_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ii = sci->sc_dsync_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (!test_bit(NILFS_I_BUSY, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) case NILFS_ST_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) break_or_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * nilfs_segctor_begin_construction - setup segment buffer to make a new log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * @sci: nilfs_sc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @nilfs: nilfs object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct nilfs_segment_buffer *segbuf, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) __u64 nextnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int err, alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) segbuf = nilfs_segbuf_new(sci->sc_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (unlikely(!segbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (list_empty(&sci->sc_write_logs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) nilfs->ns_pseg_offset, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) nilfs_shift_to_next_segment(nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) nextnum = nilfs->ns_nextnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (nilfs->ns_segnum == nilfs->ns_nextnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Start from the head of a new full segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) alloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Continue logs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) nilfs_segbuf_map_cont(segbuf, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) nextnum = prev->sb_nextnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) segbuf->sb_sum.seg_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) alloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) BUG_ON(!list_empty(&sci->sc_segbufs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) nilfs_segbuf_free(segbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct the_nilfs *nilfs, int nadd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct nilfs_segment_buffer *segbuf, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct inode *sufile = nilfs->ns_sufile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) __u64 nextnextnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int err, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Since the segment specified with nextnum might be allocated during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * the previous construction, the buffer including its segusage may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * not be dirty. The following call ensures that the buffer is dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * and will pin the buffer on memory until the sufile is written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) for (i = 0; i < nadd; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* extend segment info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) segbuf = nilfs_segbuf_new(sci->sc_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (unlikely(!segbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /* map this buffer to region of segment on-disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /* allocate the next next full segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) err = nilfs_sufile_alloc(sufile, &nextnextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) goto failed_segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) list_add_tail(&segbuf->sb_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) prev = segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) list_splice_tail(&list, &sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) failed_segbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) nilfs_segbuf_free(segbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) list_for_each_entry(segbuf, &list, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) WARN_ON(ret); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) nilfs_destroy_logs(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void nilfs_free_incomplete_logs(struct list_head *logs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct nilfs_segment_buffer *segbuf, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct inode *sufile = nilfs->ns_sufile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) segbuf = NILFS_FIRST_SEGBUF(logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) WARN_ON(ret); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (atomic_read(&segbuf->sb_err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* Case 1: The first segment failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * Case 1a: Partial segment appended into an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) segbuf->sb_fseg_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) else /* Case 1b: New full segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) set_nilfs_discontinued(nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) prev = segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) list_for_each_entry_continue(segbuf, logs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (prev->sb_nextnum != segbuf->sb_nextnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) WARN_ON(ret); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (atomic_read(&segbuf->sb_err) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) segbuf->sb_segnum != nilfs->ns_nextnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* Case 2: extended segment (!= next) failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) prev = segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct inode *sufile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) unsigned long live_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) live_blocks = segbuf->sb_sum.nblocks +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) live_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) sci->sc_seg_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) WARN_ON(ret); /* always succeed because the segusage is dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) segbuf = NILFS_FIRST_SEGBUF(logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) segbuf->sb_pseg_start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) segbuf->sb_fseg_start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) WARN_ON(ret); /* always succeed because the segusage is dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) list_for_each_entry_continue(segbuf, logs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) WARN_ON(ret); /* always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct nilfs_segment_buffer *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct inode *sufile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct nilfs_segment_buffer *segbuf = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) nilfs_truncate_logs(&sci->sc_segbufs, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct the_nilfs *nilfs, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct nilfs_cstage prev_stage = sci->sc_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) int err, nadd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* Collection retry loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) sci->sc_nblk_this_inc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) err = nilfs_segctor_reset_segment_buffer(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) err = nilfs_segctor_collect_blocks(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (unlikely(err != -E2BIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* The current segment is filled up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (mode != SC_LSEG_SR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) nilfs_clear_logs(&sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) sci->sc_freesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) sci->sc_nfreesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) WARN_ON(err); /* do not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) sci->sc_stage = prev_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct buffer_head *new_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* The caller must release old_bh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) struct nilfs_segment_buffer *segbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) sector_t blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) unsigned long nfinfo = segbuf->sb_sum.nfinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) unsigned long nblocks = 0, ndatablk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) const struct nilfs_sc_operations *sc_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct nilfs_segsum_pointer ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct nilfs_finfo *finfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) union nilfs_binfo binfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct buffer_head *bh, *bh_org;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ino_t ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (!nfinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ssp.offset = sizeof(struct nilfs_segment_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (bh == segbuf->sb_super_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!finfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) finfo = nilfs_segctor_map_segsum_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) sci, &ssp, sizeof(*finfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ino = le64_to_cpu(finfo->fi_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) nblocks = le32_to_cpu(finfo->fi_nblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ndatablk = le32_to_cpu(finfo->fi_ndatablk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) inode = bh->b_page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (mode == SC_LSEG_DSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) sc_op = &nilfs_sc_dsync_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) else if (ino == NILFS_DAT_INO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) sc_op = &nilfs_sc_dat_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) else /* file blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) sc_op = &nilfs_sc_file_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) bh_org = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) get_bh(bh_org);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) &binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (bh != bh_org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) nilfs_list_replace_buffer(bh_org, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) brelse(bh_org);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) goto failed_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (ndatablk > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) sc_op->write_data_binfo(sci, &ssp, &binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) sc_op->write_node_binfo(sci, &ssp, &binfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) blocknr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (--nblocks == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) finfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (--nfinfo == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) } else if (ndatablk > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ndatablk--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) failed_bmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) nilfs_segbuf_fill_in_segsum(segbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void nilfs_begin_page_io(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (!page || PageWriteback(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * For split b-tree node pages, this function may be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * twice. We ignore the 2nd or later calls by this check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) clear_page_dirty_for_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct page *bd_page = NULL, *fs_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) lock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) clear_page_dirty_for_io(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) set_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) unlock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) list_for_each_entry(bh, &segbuf->sb_payload_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) set_buffer_async_write(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (bh == segbuf->sb_super_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) lock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) clear_page_dirty_for_io(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) set_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) unlock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (bh->b_page != fs_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) nilfs_begin_page_io(fs_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) fs_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) lock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) clear_page_dirty_for_io(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) set_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) unlock_page(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) nilfs_begin_page_io(fs_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) static int nilfs_segctor_write(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static void nilfs_end_page_io(struct page *page, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * For b-tree node pages, this function may be called twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * or more because they might be split in a segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * For pages holding split b-tree node buffers, dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * flag on the buffers may be cleared discretely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * In that case, the page is once redirtied for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * remaining buffers, and it must be cancelled if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * all the buffers get cleaned later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (nilfs_page_buffers_clean(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) __nilfs_clear_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (!nilfs_page_buffers_clean(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static void nilfs_abort_logs(struct list_head *logs, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct page *bd_page = NULL, *fs_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (list_empty(logs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) list_for_each_entry(segbuf, logs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (bd_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) list_for_each_entry(bh, &segbuf->sb_payload_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) clear_buffer_async_write(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (bh == segbuf->sb_super_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (bh->b_page != fs_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) nilfs_end_page_io(fs_page, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) fs_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (bd_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) nilfs_end_page_io(fs_page, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct the_nilfs *nilfs, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) LIST_HEAD(logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) list_splice_tail_init(&sci->sc_write_logs, &logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) ret = nilfs_wait_on_logs(&logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) nilfs_abort_logs(&logs, ret ? : err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) list_splice_tail_init(&sci->sc_segbufs, &logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) nilfs_free_incomplete_logs(&logs, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) sci->sc_freesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) sci->sc_nfreesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) WARN_ON(ret); /* do not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) nilfs_destroy_logs(&logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static void nilfs_set_next_segment(struct the_nilfs *nilfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct nilfs_segment_buffer *segbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) nilfs->ns_segnum = segbuf->sb_segnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) nilfs->ns_nextnum = segbuf->sb_nextnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) + segbuf->sb_sum.nblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) nilfs->ns_ctime = segbuf->sb_sum.ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) struct nilfs_segment_buffer *segbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct page *bd_page = NULL, *fs_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) int update_sr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (bd_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * We assume that the buffers which belong to the same page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * continue over the buffer list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * Under this assumption, the last BHs of pages is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * identifiable by the discontinuity of bh->b_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * (page != fs_page).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * For B-tree node blocks, however, this assumption is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * guaranteed. The cleanup code of B-tree node pages needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * special care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) list_for_each_entry(bh, &segbuf->sb_payload_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) b_assoc_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) const unsigned long set_bits = BIT(BH_Uptodate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) const unsigned long clear_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) (BIT(BH_Dirty) | BIT(BH_Async_Write) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) BIT(BH_NILFS_Redirected));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) set_mask_bits(&bh->b_state, clear_bits, set_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (bh == segbuf->sb_super_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (bh->b_page != bd_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) bd_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) update_sr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (bh->b_page != fs_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) nilfs_end_page_io(fs_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) fs_page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (!nilfs_segbuf_simplex(segbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) sci->sc_lseg_stime = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * Since pages may continue over multiple segment buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * end of the last page must be checked outside of the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (bd_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) end_page_writeback(bd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) nilfs_end_page_io(fs_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) nilfs_drop_collected_inodes(&sci->sc_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (nilfs_doing_gc())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sci->sc_nblk_inc += sci->sc_nblk_this_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) nilfs_set_next_segment(nilfs, segbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (update_sr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) nilfs->ns_flushed_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) nilfs_segctor_clear_metadata_dirty(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ret = nilfs_wait_on_logs(&sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) nilfs_segctor_complete_write(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) nilfs_destroy_logs(&sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct nilfs_inode_info *ii, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct inode *ifile = sci->sc_root->ifile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (!ii->i_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct buffer_head *ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) err = nilfs_ifile_get_inode_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ifile, ii->vfs_inode.i_ino, &ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) nilfs_warn(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) "log writer: error %d getting inode block (ino=%lu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) err, ii->vfs_inode.i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (likely(!ii->i_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) ii->i_bh = ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) brelse(ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) // Always redirty the buffer to avoid race condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) mark_buffer_dirty(ii->i_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) nilfs_mdt_mark_dirty(ifile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) clear_bit(NILFS_I_QUEUED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) set_bit(NILFS_I_BUSY, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct the_nilfs *nilfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) struct nilfs_inode_info *ii, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) int defer_iput = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) test_bit(NILFS_I_DIRTY, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) clear_bit(NILFS_I_BUSY, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) brelse(ii->i_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) ii->i_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) list_del_init(&ii->i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (!ii->vfs_inode.i_nlink || during_mount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * Defer calling iput() to avoid deadlocks if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * i_nlink == 0 or mount is not yet finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) defer_iput = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) iput(&ii->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (defer_iput)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) schedule_work(&sci->sc_iput_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * Main procedure of segment constructor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) sci->sc_cno = nilfs->ns_cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) err = nilfs_segctor_collect_dirty_files(sci, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (nilfs_segctor_clean(sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) err = nilfs_segctor_begin_construction(sci, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) /* Update time stamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) sci->sc_seg_ctime = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) err = nilfs_segctor_collect(sci, nilfs, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /* Avoid empty segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) nilfs_segbuf_empty(sci->sc_curseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) nilfs_segctor_abort_construction(sci, nilfs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) err = nilfs_segctor_assign(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) nilfs_segctor_fill_in_file_bmap(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (mode == SC_LSEG_SR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) err = nilfs_segctor_fill_in_checkpoint(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) goto failed_to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) nilfs_segctor_fill_in_super_root(sci, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Write partial segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) nilfs_segctor_prepare_write(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) nilfs_add_checksums_on_logs(&sci->sc_segbufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) nilfs->ns_crc_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) err = nilfs_segctor_write(sci, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) goto failed_to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) nilfs->ns_blocksize_bits != PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * At this point, we avoid double buffering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * for blocksize < pagesize because page dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * flag is turned off during write and dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * buffers are not properly collected for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * pages crossing over segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) err = nilfs_segctor_wait(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) goto failed_to_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) nilfs_segctor_drop_written_files(sci, nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) failed_to_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) nilfs_redirty_inodes(&sci->sc_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (nilfs_doing_gc())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) nilfs_redirty_inodes(&sci->sc_gc_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) nilfs_segctor_abort_construction(sci, nilfs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * nilfs_segctor_start_timer - set timer of background write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * @sci: nilfs_sc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) * If the timer has already been set, it ignores the new request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * This function MUST be called within a section locking the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * semaphore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sci->sc_timer.expires = jiffies + sci->sc_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) add_timer(&sci->sc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) sci->sc_state |= NILFS_SEGCTOR_COMMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (!(sci->sc_flush_request & BIT(bn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) unsigned long prev_req = sci->sc_flush_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) sci->sc_flush_request |= BIT(bn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (!prev_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) wake_up(&sci->sc_wait_daemon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * nilfs_flush_segment - trigger a segment construction for resource control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * @ino: inode number of the file to be flushed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) void nilfs_flush_segment(struct super_block *sb, ino_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (!sci || nilfs_doing_construction())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* assign bit 0 to data files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct nilfs_segctor_wait_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) wait_queue_entry_t wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) __u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) atomic_t done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) struct nilfs_segctor_wait_request wait_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) init_wait(&wait_req.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) wait_req.err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) atomic_set(&wait_req.done, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) wait_req.seq = ++sci->sc_seq_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) init_waitqueue_entry(&wait_req.wq, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) wake_up(&sci->sc_wait_daemon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (atomic_read(&wait_req.done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) err = wait_req.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (!signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) finish_wait(&sci->sc_wait_request, &wait_req.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct nilfs_segctor_wait_request *wrq, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!atomic_read(&wrq->done) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) wrq->err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) atomic_set(&wrq->done, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (atomic_read(&wrq->done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) wrq->wq.func(&wrq->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * nilfs_construct_segment - construct a logical segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * Return Value: On success, 0 is retured. On errors, one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * negative error code is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * %-EROFS - Read only filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * %-EIO - I/O error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * %-ENOSPC - No space left on device (only in a panic state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * %-ERESTARTSYS - Interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * %-ENOMEM - Insufficient memory available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) int nilfs_construct_segment(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct nilfs_transaction_info *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (!sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /* A call inside transactions causes a deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) err = nilfs_segctor_sync(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * nilfs_construct_dsync_segment - construct a data-only logical segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * @inode: inode whose data blocks should be written out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * @start: start byte offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * @end: end byte offset (inclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * Return Value: On success, 0 is retured. On errors, one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * negative error code is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * %-EROFS - Read only filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * %-EIO - I/O error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * %-ENOSPC - No space left on device (only in a panic state).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * %-ERESTARTSYS - Interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * %-ENOMEM - Insufficient memory available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) loff_t start, loff_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (!sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) nilfs_transaction_lock(sb, &ti, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) nilfs_test_opt(nilfs, STRICT_ORDER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) nilfs_discontinued(nilfs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) nilfs_transaction_unlock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) err = nilfs_segctor_sync(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) !test_bit(NILFS_I_BUSY, &ii->i_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) nilfs_transaction_unlock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) sci->sc_dsync_inode = ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) sci->sc_dsync_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) sci->sc_dsync_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) nilfs->ns_flushed_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) nilfs_transaction_unlock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) #define FLUSH_FILE_BIT (0x1) /* data file only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * nilfs_segctor_accept - record accepted sequence count of log-write requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * @sci: segment constructor object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) sci->sc_seq_accepted = sci->sc_seq_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) del_timer_sync(&sci->sc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * nilfs_segctor_notify - notify the result of request to caller threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * @sci: segment constructor object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * @mode: mode of log forming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * @err: error code to be notified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) /* Clear requests (even when the construction failed) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (mode == SC_LSEG_SR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) sci->sc_seq_done = sci->sc_seq_accepted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) nilfs_segctor_wakeup(sci, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) sci->sc_flush_request = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (mode == SC_FLUSH_FILE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) sci->sc_flush_request &= ~FLUSH_FILE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) else if (mode == SC_FLUSH_DAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) sci->sc_flush_request &= ~FLUSH_DAT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* re-enable timer if checkpoint creation was not done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) time_before(jiffies, sci->sc_timer.expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) add_timer(&sci->sc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * nilfs_segctor_construct - form logs and write them to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * @sci: segment constructor object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) * @mode: mode of log forming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct nilfs_super_block **sbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) nilfs_segctor_accept(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (nilfs_discontinued(nilfs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) mode = SC_LSEG_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (!nilfs_segctor_confirm(sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) err = nilfs_segctor_do_construct(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) if (mode != SC_FLUSH_DAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) atomic_set(&nilfs->ns_ndirtyblks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) nilfs_discontinued(nilfs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) down_write(&nilfs->ns_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) sbp = nilfs_prepare_super(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) nilfs_sb_will_flip(nilfs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (likely(sbp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) nilfs_set_log_cursor(sbp[0], nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) err = nilfs_commit_super(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) NILFS_SB_COMMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) up_write(&nilfs->ns_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) nilfs_segctor_notify(sci, mode, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) static void nilfs_construction_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) wake_up_process(sci->sc_timer_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) struct nilfs_inode_info *ii, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) list_for_each_entry_safe(ii, n, head, i_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) list_del_init(&ii->i_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) truncate_inode_pages(&ii->vfs_inode.i_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) nilfs_btnode_cache_clear(&ii->i_btnode_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) iput(&ii->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) void **kbufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) struct nilfs_sc_info *sci = nilfs->ns_writer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (unlikely(!sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) nilfs_transaction_lock(sb, &ti, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) sci->sc_freesegs = kbufs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) sci->sc_nfreesegs = argv[4].v_nmembs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) err = nilfs_segctor_construct(sci, SC_LSEG_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) nilfs_warn(sb, "error %d cleaning segments", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) schedule_timeout(sci->sc_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (nilfs_test_opt(nilfs, DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) sci->sc_nfreesegs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) nilfs_warn(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) "error %d on discard request, turning discards off for the device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) nilfs_clear_opt(nilfs, DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) sci->sc_freesegs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) sci->sc_nfreesegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) nilfs_transaction_unlock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) nilfs_transaction_lock(sci->sc_super, &ti, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) nilfs_segctor_construct(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) * Unclosed segment should be retried. We do this using sc_timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) * Timeout of sc_timer will invoke complete construction which leads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) * to close the current logical segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) nilfs_segctor_start_timer(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) nilfs_transaction_unlock(sci->sc_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) int mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) SC_FLUSH_DAT : SC_FLUSH_FILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) nilfs_segctor_do_construct(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) return SC_FLUSH_FILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return SC_FLUSH_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return SC_LSEG_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * nilfs_segctor_thread - main loop of the segment constructor thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * @arg: pointer to a struct nilfs_sc_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * nilfs_segctor_thread() initializes a timer and serves as a daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * to execute segment constructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static int nilfs_segctor_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) sci->sc_timer_task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) /* start sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) sci->sc_task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) nilfs_info(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (sci->sc_state & NILFS_SEGCTOR_QUIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) goto end_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (timeout || sci->sc_seq_request != sci->sc_seq_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) mode = SC_LSEG_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) else if (sci->sc_flush_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) mode = nilfs_segctor_flush_mode(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) nilfs_segctor_thread_construct(sci, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (freezing(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) int should_sleep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) prepare_to_wait(&sci->sc_wait_daemon, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (sci->sc_seq_request != sci->sc_seq_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) should_sleep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) else if (sci->sc_flush_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) should_sleep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) should_sleep = time_before(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) sci->sc_timer.expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (should_sleep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) finish_wait(&sci->sc_wait_daemon, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) time_after_eq(jiffies, sci->sc_timer.expires));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) set_nilfs_discontinued(nilfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) goto loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) end_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /* end sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) sci->sc_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) t = kthread_run(nilfs_segctor_thread, sci, "segctord");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (IS_ERR(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) int err = PTR_ERR(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) nilfs_err(sci->sc_super, "error %d creating segctord thread",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) wait_event(sci->sc_wait_task, sci->sc_task != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) __acquires(&sci->sc_state_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) __releases(&sci->sc_state_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) sci->sc_state |= NILFS_SEGCTOR_QUIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) while (sci->sc_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) wake_up(&sci->sc_wait_daemon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) wait_event(sci->sc_wait_task, sci->sc_task == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * Setup & clean-up functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) struct nilfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) struct nilfs_sc_info *sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) sci = kzalloc(sizeof(*sci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (!sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) sci->sc_super = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) nilfs_get_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) sci->sc_root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) init_waitqueue_head(&sci->sc_wait_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) init_waitqueue_head(&sci->sc_wait_daemon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) init_waitqueue_head(&sci->sc_wait_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) spin_lock_init(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) INIT_LIST_HEAD(&sci->sc_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) INIT_LIST_HEAD(&sci->sc_segbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) INIT_LIST_HEAD(&sci->sc_write_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) INIT_LIST_HEAD(&sci->sc_gc_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) INIT_LIST_HEAD(&sci->sc_iput_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (nilfs->ns_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) sci->sc_interval = HZ * nilfs->ns_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (nilfs->ns_watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) sci->sc_watermark = nilfs->ns_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * The segctord thread was stopped and its timer was removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * But some tasks remain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) nilfs_transaction_lock(sci->sc_super, &ti, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) nilfs_transaction_unlock(sci->sc_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) flush_work(&sci->sc_iput_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) } while (ret && retrycount-- > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * nilfs_segctor_destroy - destroy the segment constructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * @sci: nilfs_sc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * nilfs_segctor_destroy() kills the segctord thread and frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * the nilfs_sc_info struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * Caller must hold the segment semaphore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) up_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) spin_lock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) nilfs_segctor_kill_thread(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) || sci->sc_seq_request != sci->sc_seq_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) spin_unlock(&sci->sc_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (flush_work(&sci->sc_iput_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (flag || !nilfs_segctor_confirm(sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) nilfs_segctor_write_out(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (!list_empty(&sci->sc_dirty_files)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) nilfs_warn(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) "disposed unprocessed dirty file(s) when stopping log writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (!list_empty(&sci->sc_iput_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) nilfs_warn(sci->sc_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) "disposed unprocessed inode(s) in iput queue when stopping log writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) WARN_ON(!list_empty(&sci->sc_segbufs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) WARN_ON(!list_empty(&sci->sc_write_logs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) nilfs_put_root(sci->sc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) down_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) del_timer_sync(&sci->sc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) kfree(sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * nilfs_attach_log_writer - attach log writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * @sb: super block instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * @root: root object of the current filesystem tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * This allocates a log writer object, initializes it, and starts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * log writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * Return Value: On success, 0 is returned. On error, one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * negative error code is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * %-ENOMEM - Insufficient memory available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (nilfs->ns_writer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * This happens if the filesystem was remounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * read/write after nilfs_error degenerated it into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * read-only mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) nilfs_detach_log_writer(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) nilfs->ns_writer = nilfs_segctor_new(sb, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (!nilfs->ns_writer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) err = nilfs_segctor_start_thread(nilfs->ns_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) kfree(nilfs->ns_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) nilfs->ns_writer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * nilfs_detach_log_writer - destroy log writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * @sb: super block instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * This kills log writer daemon, frees the log writer object, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) * destroys list of dirty files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) void nilfs_detach_log_writer(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) LIST_HEAD(garbage_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) down_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (nilfs->ns_writer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) nilfs_segctor_destroy(nilfs->ns_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) nilfs->ns_writer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /* Force to free the list of dirty files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (!list_empty(&nilfs->ns_dirty_files)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) nilfs_warn(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) "disposed unprocessed dirty file(s) when detaching log writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) up_write(&nilfs->ns_segctor_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) nilfs_dispose_list(nilfs, &garbage_list, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }