^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ext4/resize.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Support for resizing an ext4 filesystem while it is mounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This could probably be made into a module, because it is not often in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define EXT4FS_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "ext4_jbd2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct ext4_rcu_ptr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void ext4_rcu_ptr_callback(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct ext4_rcu_ptr *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ptr = container_of(head, struct ext4_rcu_ptr, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) kvfree(ptr->ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void ext4_kvfree_array_rcu(void *to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ptr->ptr = to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) kvfree(to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int ext4_resize_begin(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!capable(CAP_SYS_RESOURCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * If we are not using the primary superblock/GDT copy don't resize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * because the user tools have no way of handling this. Probably a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * bad time to do it anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ext4_warning(sb, "won't resize using backup superblock at %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * We are not allowed to do online-resizing on a filesystem mounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * with error, because it can destroy the filesystem easily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ext4_warning(sb, "There are errors in the filesystem, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "so online resizing is not allowed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (ext4_has_feature_sparse_super2(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) &EXT4_SB(sb)->s_ext4_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void ext4_resize_end(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ext4_group_t group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) EXT4_DESC_PER_BLOCK_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ext4_group_t group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) group = ext4_meta_bg_first_group(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return ext4_group_first_block_no(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ext4_group_t group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ext4_grpblk_t overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) overhead = ext4_bg_num_gdb(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (ext4_bg_has_super(sb, group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) overhead += 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define outside(b, first, last) ((b) < (first) || (b) >= (last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define inside(b, first, last) ((b) >= (first) && (b) < (last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int verify_group_input(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct ext4_new_group_data *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ext4_fsblk_t start = ext4_blocks_count(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ext4_fsblk_t end = start + input->blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ext4_group_t group = input->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ext4_fsblk_t metaend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ext4_grpblk_t free_blocks_count, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (group != sbi->s_groups_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ext4_warning(sb, "Cannot add at group %u (only %u groups)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) input->group, sbi->s_groups_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) overhead = ext4_group_overhead_blocks(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) metaend = start + overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) input->free_clusters_count = free_blocks_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (test_opt(sb, DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) "(%d free, %u reserved)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ext4_bg_has_super(sb, input->group) ? "normal" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) "no-super", input->group, input->blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) free_blocks_count, input->reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ext4_get_group_no_and_offset(sb, start, NULL, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (offset != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ext4_warning(sb, "Last group not full");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else if (input->reserved_blocks > input->blocks_count / 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ext4_warning(sb, "Reserved blocks too high (%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) input->reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) else if (free_blocks_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ext4_warning(sb, "Bad blocks count %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) input->blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ext4_warning(sb, "Cannot read last block (%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } else if (outside(input->block_bitmap, start, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ext4_warning(sb, "Block bitmap not in group (block %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) (unsigned long long)input->block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) else if (outside(input->inode_bitmap, start, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ext4_warning(sb, "Inode bitmap not in group (block %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) (unsigned long long)input->inode_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else if (outside(input->inode_table, start, end) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) outside(itend - 1, start, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (unsigned long long)input->inode_table, itend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else if (input->inode_bitmap == input->block_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (unsigned long long)input->block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) else if (inside(input->block_bitmap, input->inode_table, itend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ext4_warning(sb, "Block bitmap (%llu) in inode table "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) "(%llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) (unsigned long long)input->block_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) (unsigned long long)input->inode_table, itend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else if (inside(input->inode_bitmap, input->inode_table, itend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ext4_warning(sb, "Inode bitmap (%llu) in inode table "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) "(%llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (unsigned long long)input->inode_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (unsigned long long)input->inode_table, itend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) else if (inside(input->block_bitmap, start, metaend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (unsigned long long)input->block_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) start, metaend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) else if (inside(input->inode_bitmap, start, metaend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) (unsigned long long)input->inode_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) start, metaend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else if (inside(input->inode_table, start, metaend) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) inside(itend - 1, start, metaend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) "(%llu-%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) (unsigned long long)input->inode_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) itend - 1, start, metaend - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * group each time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct ext4_new_flex_group_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ext4_new_group_data *groups; /* new_group_data for groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) in the flex group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __u16 *bg_flags; /* block group flags of groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) in @groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ext4_group_t count; /* number of groups in @groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @flexbg_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Returns NULL on failure otherwise address of the allocated structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct ext4_new_flex_group_data *flex_gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (flex_gd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) flex_gd->count = flexbg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) flex_gd->groups = kmalloc_array(flexbg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sizeof(struct ext4_new_group_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (flex_gd->groups == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (flex_gd->bg_flags == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return flex_gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kfree(flex_gd->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) kfree(flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) kfree(flex_gd->bg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) kfree(flex_gd->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) kfree(flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * and inode tables for a flex group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * This function is used by 64bit-resize. Note that this function allocates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * group tables from the 1st group of groups contained by @flexgd, which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * be a partial of a flex group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @sb: super block of fs to which the groups belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Returns 0 on a successful allocation of the metadata blocks in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int ext4_alloc_group_tables(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct ext4_new_flex_group_data *flex_gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int flexbg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct ext4_new_group_data *group_data = flex_gd->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ext4_fsblk_t start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ext4_fsblk_t last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ext4_group_t src_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ext4_group_t bb_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ext4_group_t ib_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ext4_group_t it_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ext4_group_t last_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) BUG_ON(flex_gd->count == 0 || group_data == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) src_group = group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) last_group = src_group + flex_gd->count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) (last_group & ~(flexbg_size - 1))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) next_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) group = group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (src_group >= group_data[0].group + flex_gd->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) start_blk = ext4_group_first_block_no(sb, src_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) last_blk = start_blk + group_data[src_group - group].blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) overhead = ext4_group_overhead_blocks(sb, src_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) start_blk += overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* We collect contiguous blocks as much as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) src_group++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) for (; src_group <= last_group; src_group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) overhead = ext4_group_overhead_blocks(sb, src_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (overhead == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) last_blk += group_data[src_group - group].blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Allocate block bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for (; bb_index < flex_gd->count; bb_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (start_blk >= last_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto next_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) group_data[bb_index].block_bitmap = start_blk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) group = ext4_get_group_number(sb, start_blk - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) group -= group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) group_data[group].mdata_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) flex_gd->bg_flags[group] &= uninit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Allocate inode bitmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) for (; ib_index < flex_gd->count; ib_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (start_blk >= last_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto next_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) group_data[ib_index].inode_bitmap = start_blk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) group = ext4_get_group_number(sb, start_blk - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) group -= group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) group_data[group].mdata_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) flex_gd->bg_flags[group] &= uninit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Allocate inode tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (; it_index < flex_gd->count; it_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ext4_fsblk_t next_group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (start_blk + itb > last_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto next_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) group_data[it_index].inode_table = start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) group = ext4_get_group_number(sb, start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) next_group_start = ext4_group_first_block_no(sb, group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) group -= group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (start_blk + itb > next_group_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) flex_gd->bg_flags[group + 1] &= uninit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) overhead = start_blk + itb - next_group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) group_data[group + 1].mdata_blocks += overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) itb -= overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) group_data[group].mdata_blocks += itb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) flex_gd->bg_flags[group] &= uninit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) start_blk += EXT4_SB(sb)->s_itb_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Update free clusters count to exclude metadata blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) for (i = 0; i < flex_gd->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) group_data[i].free_clusters_count -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) EXT4_NUM_B2C(EXT4_SB(sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) group_data[i].mdata_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (test_opt(sb, DEBUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) group = group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) "%d groups, flexbg size is %d:\n", flex_gd->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) flexbg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) for (i = 0; i < flex_gd->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ext4_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ext4_bg_has_super(sb, group + i) ? "normal" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) "no-super", group + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) group_data[i].blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) group_data[i].free_clusters_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) group_data[i].mdata_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ext4_fsblk_t blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bh = sb_getblk(sb, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (unlikely(!bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) BUFFER_TRACE(bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if ((err = ext4_journal_get_write_access(handle, bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bh = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) memset(bh->b_data, 0, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return ext4_journal_ensure_credits_fn(handle, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) EXT4_MAX_TRANS_DATA, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Helper function for ext4_setup_new_group_blocks() which set .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * @handle: journal handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @flex_gd: flex group data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct ext4_new_flex_group_data *flex_gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ext4_group_t count = last_cluster - first_cluster + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ext4_group_t count2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) last_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) for (count2 = count; count > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) count -= count2, first_cluster += count2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ext4_fsblk_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) group -= flex_gd->groups[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (count2 > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) count2 = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) BUG_ON(flex_gd->count > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) err = ext4_resize_ensure_credits_batch(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (unlikely(!bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) BUFFER_TRACE(bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) err = ext4_journal_get_write_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) first_cluster, first_cluster - start, count2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ext4_set_bits(bh->b_data, first_cluster - start, count2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) err = ext4_handle_dirty_metadata(handle, NULL, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Set up the block and inode bitmaps, and the inode table for the new groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * This doesn't need to be part of the main transaction, since we are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * changing blocks outside the actual filesystem. We still do journaling to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * ensure the recovery is correct in case of a failure just after resize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * If any part of this fails, we simply abort the resize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * setup_new_flex_group_blocks handles a flex group as follow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * 1. copy super block and GDT, and initialize group tables if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * In this step, we only set bits in blocks bitmaps for blocks taken by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * super block and GDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * 2. allocate group tables in block bitmaps, that is, set bits in block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * bitmap for blocks taken by group tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static int setup_new_flex_group_blocks(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct ext4_new_flex_group_data *flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ext4_fsblk_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ext4_fsblk_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct ext4_new_group_data *group_data = flex_gd->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) __u16 *bg_flags = flex_gd->bg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ext4_group_t group, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int reserved_gdb, i, j, err = 0, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int meta_bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) BUG_ON(!flex_gd->count || !group_data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) group_data[0].group != sbi->s_groups_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) meta_bg = ext4_has_feature_meta_bg(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* This transaction may be extended/restarted along the way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) group = group_data[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = 0; i < flex_gd->count; i++, group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long gdblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ext4_grpblk_t overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) gdblocks = ext4_bg_num_gdb(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) start = ext4_group_first_block_no(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto handle_itb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (meta_bg == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ext4_group_t first_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) first_group = ext4_meta_bg_first_group(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (first_group != group + 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto handle_itb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) block = start + ext4_bg_has_super(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Copy all of the GDT blocks into the backup in this group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) for (j = 0; j < gdblocks; j++, block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct buffer_head *gdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ext4_debug("update backup group %#04llx\n", block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err = ext4_resize_ensure_credits_batch(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) gdb = sb_getblk(sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (unlikely(!gdb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) BUFFER_TRACE(gdb, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) err = ext4_journal_get_write_access(handle, gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) brelse(gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) s_group_desc, j)->b_data, gdb->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) set_buffer_uptodate(gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) err = ext4_handle_dirty_metadata(handle, NULL, gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) brelse(gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) brelse(gdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Zero out all of the reserved backup group descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * table blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ext4_bg_has_super(sb, group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) err = sb_issue_zeroout(sb, gdblocks + start + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) reserved_gdb, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) handle_itb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Initialize group tables of the grop @group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) goto handle_bb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Zero out all of the inode table blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) block = group_data[i].inode_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) block, sbi->s_itb_per_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) handle_bb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto handle_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Initialize block bitmap of the @group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) block = group_data[i].block_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) err = ext4_resize_ensure_credits_batch(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) bh = bclean(handle, sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) err = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) overhead = ext4_group_overhead_blocks(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (overhead != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ext4_debug("mark backup superblock %#04llx (+0)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ext4_set_bits(bh->b_data, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EXT4_NUM_B2C(sbi, overhead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sb->s_blocksize * 8, bh->b_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) err = ext4_handle_dirty_metadata(handle, NULL, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) handle_ib:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Initialize inode bitmap of the @group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) block = group_data[i].inode_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) err = ext4_resize_ensure_credits_batch(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Mark unused entries in inode bitmap used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) bh = bclean(handle, sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) err = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) sb->s_blocksize * 8, bh->b_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) err = ext4_handle_dirty_metadata(handle, NULL, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Mark group tables in block bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for (j = 0; j < GROUP_TABLE_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) count = group_table_count[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) start = (&group_data[0].block_bitmap)[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) block = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) for (i = 1; i < flex_gd->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) block += group_table_count[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (block == (&group_data[i].block_bitmap)[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) count += group_table_count[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) err = set_flexbg_block_bitmap(sb, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) flex_gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) EXT4_B2C(sbi, start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) EXT4_B2C(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) start + count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) count = group_table_count[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) start = (&group_data[i].block_bitmap)[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) block = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) err = set_flexbg_block_bitmap(sb, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) flex_gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) EXT4_B2C(sbi, start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) EXT4_B2C(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) start + count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) err2 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (err2 && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * Iterate through the groups which hold BACKUP superblock/GDT copies in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * calling this for the first time. In a sparse filesystem it will be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned *five, unsigned *seven)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned *min = three;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int mult = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (!ext4_has_feature_sparse_super(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = *min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *min += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (*five < *min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) min = five;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) mult = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (*seven < *min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) min = seven;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mult = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ret = *min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *min *= mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * Check that all of the backup GDT blocks are held in the primary GDT block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * It is assumed that they are stored in group order. Returns the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * groups in current filesystem that have BACKUPS, or -ve error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static int verify_reserved_gdb(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ext4_group_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct buffer_head *primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) const ext4_fsblk_t blk = primary->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unsigned three = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) unsigned five = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned seven = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) __le32 *p = (__le32 *)primary->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int gdbackups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (le32_to_cpu(*p++) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ext4_warning(sb, "reserved GDT %llu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) " missing grp %d (%llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) blk, grp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) grp *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return gdbackups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * Called when we need to bring a reserved group descriptor table block into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * use from the resize inode. The primary copy of the new GDT block currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * is an indirect block (under the double indirect block in the resize inode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * The new backup GDT blocks will be stored as leaf blocks in this indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * block, in group order. Even though we know all the block numbers we need,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * we check to ensure that the resize inode has actually reserved these blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * Don't need to update the block bitmaps because the blocks are still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * We get all of the error cases out of the way, so that we are sure to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * fail once we start modifying the data on disk, because JBD has no rollback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int add_new_gdb(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ext4_group_t group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct ext4_super_block *es = EXT4_SB(sb)->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct buffer_head **o_group_desc, **n_group_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct buffer_head *dind = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct buffer_head *gdb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int gdbackups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct ext4_iloc iloc = { .bh = NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __le32 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (test_opt(sb, DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) gdb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) gdb_bh = ext4_sb_bread(sb, gdblock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (IS_ERR(gdb_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return PTR_ERR(gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (gdbackups < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) err = gdbackups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (IS_ERR(dind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err = PTR_ERR(dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dind = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) data = (__le32 *)dind->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ext4_warning(sb, "new group %u GDT block %llu not reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) group, gdblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) BUFFER_TRACE(gdb_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) err = ext4_journal_get_write_access(handle, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) BUFFER_TRACE(dind, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) err = ext4_journal_get_write_access(handle, dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* ext4_reserve_inode_write() gets a reference on the iloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = ext4_reserve_inode_write(handle, inode, &iloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (!n_group_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ext4_warning(sb, "not enough memory for %lu groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) gdb_num + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Finally, we have all of the possible failures behind us...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Remove new GDT block from inode double-indirect block and clear out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * the new GDT block for use (which also "frees" the backup GDT blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * from the reserved inode). We don't need to change the bitmaps for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * these blocks, because they are marked as in-use from being in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * reserved inode, and will become GDT blocks (primary and backup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) err = ext4_handle_dirty_metadata(handle, NULL, dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) (9 - EXT4_SB(sb)->s_cluster_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ext4_mark_iloc_dirty(handle, inode, &iloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memset(gdb_bh->b_data, 0, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) iloc.bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) brelse(dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) memcpy(n_group_desc, o_group_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) n_group_desc[gdb_num] = gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) EXT4_SB(sb)->s_gdb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ext4_kvfree_array_rcu(o_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) err = ext4_handle_dirty_super(handle, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) kvfree(n_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) brelse(iloc.bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) brelse(dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) brelse(gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ext4_debug("leaving with error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * add_new_gdb_meta_bg is the sister of add_new_gdb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static int add_new_gdb_meta_bg(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) handle_t *handle, ext4_group_t group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ext4_fsblk_t gdblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct buffer_head *gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct buffer_head **o_group_desc, **n_group_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) gdblock = ext4_meta_bg_first_block_no(sb, group) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ext4_bg_has_super(sb, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) gdb_bh = ext4_sb_bread(sb, gdblock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (IS_ERR(gdb_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return PTR_ERR(gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!n_group_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) brelse(gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ext4_warning(sb, "not enough memory for %lu groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) gdb_num + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) memcpy(n_group_desc, o_group_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) n_group_desc[gdb_num] = gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) BUFFER_TRACE(gdb_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) err = ext4_journal_get_write_access(handle, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) kvfree(n_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) brelse(gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) EXT4_SB(sb)->s_gdb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ext4_kvfree_array_rcu(o_group_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * Called when we are adding a new group which has a backup copy of each of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * We need to add these reserved backup GDT blocks to the resize inode, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * that they are kept for future resizing and not allocated to files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Each reserved backup GDT block will go into a different indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * The indirect blocks are actually the primary reserved GDT blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * so we know in advance what their block numbers are. We only get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * double-indirect block to verify it is pointing to the primary reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * GDT blocks so we don't overwrite a data block by accident. The reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * backup GDT blocks are stored in their reserved primary GDT block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ext4_group_t group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct buffer_head **primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct buffer_head *dind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct ext4_iloc iloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ext4_fsblk_t blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) __le32 *data, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int gdbackups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int res, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (IS_ERR(dind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err = PTR_ERR(dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dind = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto exit_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) EXT4_ADDR_PER_BLOCK(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* Get each reserved primary GDT block and verify it holds backups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for (res = 0; res < reserved_gdb; res++, blk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (le32_to_cpu(*data) != blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ext4_warning(sb, "reserved block %llu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) " not at offset %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) (long)(data - (__le32 *)dind->b_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) goto exit_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) primary[res] = ext4_sb_bread(sb, blk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (IS_ERR(primary[res])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) err = PTR_ERR(primary[res]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) primary[res] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto exit_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) gdbackups = verify_reserved_gdb(sb, group, primary[res]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (gdbackups < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) brelse(primary[res]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) err = gdbackups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto exit_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (++data >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) data = (__le32 *)dind->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) for (i = 0; i < reserved_gdb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) BUFFER_TRACE(primary[i], "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if ((err = ext4_journal_get_write_access(handle, primary[i])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) goto exit_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) goto exit_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * Finally we can add each of the reserved backup GDT blocks from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * the new group to its reserved primary GDT block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) blk = group * EXT4_BLOCKS_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) for (i = 0; i < reserved_gdb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) data = (__le32 *)primary[i]->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* printk("reserving backup %lu[%u] = %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) primary[i]->b_blocknr, gdbackups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) blk + primary[i]->b_blocknr); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ext4_mark_iloc_dirty(handle, inode, &iloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) exit_bh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) while (--res >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) brelse(primary[res]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) brelse(dind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) exit_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) kfree(primary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * Update the backup copies of the ext4 metadata. These don't need to be part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * of the main resize transaction, because e2fsck will re-write them if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * is a problem (basically only OOM will cause a problem). However, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * _should_ update the backups if possible, in case the primary gets trashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * for some reason and we need to run e2fsck from a backup superblock. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * important part is that the new block and inode counts are in the backup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * superblocks, and the location of the new group metadata in the GDT backups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * We do not need take the s_resize_lock for this, because these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * blocks are not otherwise touched by the filesystem code when it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * mounted. We don't need to worry about last changing from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * sbi->s_groups_count, because the worst that can happen is that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * do not copy the full number of backups at this time. The resize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * which changed s_groups_count will backup again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int size, int meta_bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ext4_group_t last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) unsigned three = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) unsigned five = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) unsigned seven = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ext4_group_t group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) int rest = sb->s_blocksize - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int err = 0, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) group = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) err = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto exit_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (meta_bg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) group = ext4_list_backups(sb, &three, &five, &seven);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) last = sbi->s_groups_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) group = ext4_get_group_number(sb, blk_off) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) while (group < sbi->s_groups_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ext4_fsblk_t backup_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Out of journal space, and can't get more - abort - so sad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) err = ext4_resize_ensure_credits_batch(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (meta_bg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) backup_block = (ext4_group_first_block_no(sb, group) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ext4_bg_has_super(sb, group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) bh = sb_getblk(sb, backup_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ext4_debug("update metadata backup %llu(+%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) backup_block, backup_block -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ext4_group_first_block_no(sb, group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) BUFFER_TRACE(bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if ((err = ext4_journal_get_write_access(handle, bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) memcpy(bh->b_data, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (rest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) memset(bh->b_data + size, 0, rest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) err = ext4_handle_dirty_metadata(handle, NULL, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (meta_bg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) group = ext4_list_backups(sb, &three, &five, &seven);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) else if (group == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) group = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if ((err2 = ext4_journal_stop(handle)) && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * Ugh! Need to have e2fsck write the backup copies. It is too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * late to revert the resize, we shouldn't fail just because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * the backup copies (they are only needed in case of corruption).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * However, if we got here we have a journal problem too, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * can't really start a transaction to mark the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Chicken out and just set the flag on the hope it will be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * to disk, and if not - we will simply wait until next fsck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) exit_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ext4_warning(sb, "can't update backup for group %u (err %d), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) "forcing fsck on next reboot", group, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) sbi->s_mount_state &= ~EXT4_VALID_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) mark_buffer_dirty(sbi->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * ext4_add_new_descs() adds @count group descriptor of groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * starting at @group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @handle: journal handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @group: the group no. of the first group desc to be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * @resize_inode: the resize inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * @count: number of group descriptors to be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ext4_group_t group, struct inode *resize_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ext4_group_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct buffer_head *gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int i, gdb_off, gdb_num, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int meta_bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) meta_bg = ext4_has_feature_meta_bg(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) for (i = 0; i < count; i++, group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int reserved_gdb = ext4_bg_has_super(sb, group) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * We will only either add reserved group blocks to a backup group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * or remove reserved blocks for the first group in a new group block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Doing both would be mean more complex code, and sane people don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * use non-sparse filesystems anymore. This is already checked above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (gdb_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) gdb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) BUFFER_TRACE(gdb_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) err = ext4_journal_get_write_access(handle, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) err = reserve_backup_gdb(handle, resize_inode, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) } else if (meta_bg != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err = add_new_gdb_meta_bg(sb, handle, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) err = add_new_gdb(handle, resize_inode, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct buffer_head *bh = sb_getblk(sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (unlikely(!bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (!bh_uptodate_or_lock(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (ext4_read_bh(bh, 0, NULL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int ext4_set_bitmap_checksums(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ext4_group_t group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct ext4_group_desc *gdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct ext4_new_group_data *group_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!ext4_has_metadata_csum(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) EXT4_INODES_PER_GROUP(sb) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) bh = ext4_get_bitmap(sb, group_data->block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ext4_block_bitmap_csum_set(sb, group, gdp, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct ext4_new_flex_group_data *flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct ext4_new_group_data *group_data = flex_gd->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct ext4_group_desc *gdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct buffer_head *gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) __u16 *bg_flags = flex_gd->bg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) int i, gdb_off, gdb_num, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) group = group_data->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* Update group descriptor block for new group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) gdb_off * EXT4_DESC_SIZE(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) memset(gdp, 0, EXT4_DESC_SIZE(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ext4_inode_table_set(sb, gdp, group_data->inode_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ext4_free_group_clusters_set(sb, gdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) group_data->free_clusters_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (ext4_has_group_desc_csum(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) ext4_itable_unused_set(sb, gdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) EXT4_INODES_PER_GROUP(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) gdp->bg_flags = cpu_to_le16(*bg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) ext4_group_desc_csum_set(sb, group, gdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * We can allocate memory for mb_alloc based on the new group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) err = ext4_mb_add_groupinfo(sb, group, gdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * ext4_update_super() updates the super block so that the newly added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * groups can be seen by the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * @flex_gd: new added groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static void ext4_update_super(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct ext4_new_flex_group_data *flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ext4_fsblk_t blocks_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ext4_fsblk_t free_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ext4_fsblk_t reserved_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct ext4_new_group_data *group_data = flex_gd->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) BUG_ON(flex_gd->count == 0 || group_data == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * Make the new blocks and inodes valid next. We do this before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * increasing the group count so that once the group is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * all of its blocks and inodes are already valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * We always allocate group-by-group, then block-by-block or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * inode-by-inode within a group, so enabling these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * blocks/inodes before the group is live won't actually let us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * allocate the new space yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < flex_gd->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) blocks_count += group_data[i].blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) reserved_blocks = ext4_r_blocks_count(es) * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) reserved_blocks *= blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) do_div(reserved_blocks, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) flex_gd->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) flex_gd->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * We need to protect s_groups_count against other CPUs seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * inconsistent state in the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * The precise rules we use are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * * Writers must perform a smp_wmb() after updating all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * dependent data and before modifying the groups count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * * Readers must perform an smp_rmb() after reading the groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * count and before reading any dependent data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * NB. These rules can be relaxed when checking the group count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * while freeing data, as we can only allocate from a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * group after serialising against the group count, and we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * only then free after serialising in turn against that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Update the global fs size fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) sbi->s_groups_count += flex_gd->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* Update the reserved block counts only once the new group is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /* Update the free space counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) percpu_counter_add(&sbi->s_freeclusters_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) EXT4_NUM_B2C(sbi, free_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) percpu_counter_add(&sbi->s_freeinodes_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) ext4_debug("free blocks count %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) percpu_counter_read(&sbi->s_freeclusters_counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ext4_group_t flex_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct flex_groups *fg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) flex_group = ext4_flex_group(sbi, group_data[0].group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) &fg->free_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) &fg->free_inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * Update the fs overhead information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ext4_calculate_overhead(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (test_opt(sb, DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) printk(KERN_DEBUG "EXT4-fs: added group %u:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) blocks_count, free_blocks, reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* Add a flex group to an fs. Ensure we handle all possible error conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * _before_ we start modifying the filesystem, because we cannot abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * transaction and not have it write the data to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static int ext4_flex_group_add(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct inode *resize_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct ext4_new_flex_group_data *flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ext4_fsblk_t o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) ext4_grpblk_t last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) unsigned reserved_gdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int err = 0, err2 = 0, credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) o_blocks_count = ext4_blocks_count(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) BUG_ON(last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) err = setup_new_flex_group_blocks(sb, flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * We will always be modifying at least the superblock and GDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * blocks. If we are adding a group past the last current GDT block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * we will also modify the inode and the dindirect block. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * are adding a group with superblock/GDT backups we will also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * modify each of the reserved GDT dindirect blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) credit = 3; /* sb, resize inode, resize inode dindirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* GDT blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) credit += reserved_gdb; /* Reserved GDT dindirect blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) err = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) BUFFER_TRACE(sbi->s_sbh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) err = ext4_journal_get_write_access(handle, sbi->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) goto exit_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) group = flex_gd->groups[0].group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) BUG_ON(group != sbi->s_groups_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) err = ext4_add_new_descs(handle, sb, group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) resize_inode, flex_gd->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto exit_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) err = ext4_setup_new_descs(handle, sb, flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto exit_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ext4_update_super(sb, flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) err = ext4_handle_dirty_super(handle, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) exit_journal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) err2 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) int gdb_num_end = ((group + flex_gd->count - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) EXT4_DESC_PER_BLOCK(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int meta_bg = ext4_has_feature_meta_bg(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) sector_t old_gdb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) sizeof(struct ext4_super_block), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) for (; gdb_num <= gdb_num_end; gdb_num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct buffer_head *gdb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) gdb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (old_gdb == gdb_bh->b_blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) gdb_bh->b_size, meta_bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) old_gdb = gdb_bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static int ext4_setup_next_flex_gd(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct ext4_new_flex_group_data *flex_gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ext4_fsblk_t n_blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) unsigned long flexbg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct ext4_new_group_data *group_data = flex_gd->groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ext4_fsblk_t o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ext4_group_t n_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ext4_group_t last_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ext4_grpblk_t last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ext4_grpblk_t clusters_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) o_blocks_count = ext4_blocks_count(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (o_blocks_count == n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) BUG_ON(last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) last_group = group | (flexbg_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (last_group > n_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) last_group = n_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) flex_gd->count = last_group - group + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) for (i = 0; i < flex_gd->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) group_data[i].group = group + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) overhead = ext4_group_overhead_blocks(sb, group + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) group_data[i].mdata_blocks = overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (ext4_has_group_desc_csum(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) EXT4_BG_INODE_UNINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!test_opt(sb, INIT_INODE_TABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (last_group == n_group && ext4_has_group_desc_csum(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* We need to initialize block bitmap of last group. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if ((last_group == n_group) && (last != clusters_per_group - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) group_data[i - 1].free_clusters_count -= clusters_per_group -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) last - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* Add group descriptor data to an existing or new group descriptor block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * Ensure we handle all possible error conditions _before_ we start modifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * the filesystem, because we cannot abort the transaction and not have it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * write the data to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * If we are on a GDT block boundary, we need to get the reserved GDT block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Otherwise, we may need to add backup GDT blocks for a sparse group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * We only need to hold the superblock lock while we are actually adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * in the new group's counts to the superblock. Prior to that we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * not really "added" the group at all. We re-check that we are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * adding in the last group in case things have changed since verifying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct ext4_new_flex_group_data flex_gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int gdb_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) __u16 bg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ext4_warning(sb, "Can't resize non-sparse filesystem further");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (ext4_blocks_count(es) + input->blocks_count <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ext4_blocks_count(es)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) ext4_warning(sb, "blocks_count overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) le32_to_cpu(es->s_inodes_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ext4_warning(sb, "inodes_count overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (reserved_gdb || gdb_off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (!ext4_has_feature_resize_inode(sb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) !le16_to_cpu(es->s_reserved_gdt_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ext4_warning(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) "No reserved GDT blocks, can't resize");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ext4_warning(sb, "Error opening resize inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) err = verify_group_input(sb, input);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) err = ext4_alloc_flex_bg_array(sb, input->group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) flex_gd.count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) flex_gd.groups = input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) flex_gd.bg_flags = &bg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) err = ext4_flex_group_add(sb, inode, &flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) } /* ext4_group_add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * extend a group without checking assuming that checking has been done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) static int ext4_group_extend_no_check(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct ext4_super_block *es = EXT4_SB(sb)->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) int err = 0, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* We will update the superblock, one block bitmap, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * one group descriptor via ext4_group_add_blocks().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) err = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ext4_warning(sb, "error %d on journal start", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ext4_warning(sb, "error %d on journal write access", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ext4_blocks_count_set(es, o_blocks_count + add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) o_blocks_count + add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* We add the blocks to the bitmap and set the group need init bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) ext4_handle_dirty_super(handle, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) o_blocks_count + add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) err2 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (err2 && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (test_opt(sb, DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) "blocks\n", ext4_blocks_count(es));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) (char *)es, sizeof(struct ext4_super_block), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * Extend the filesystem to the new number of blocks specified. This entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * point is only used to extend the current filesystem to the end of the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * for emergencies (because it has no dependencies on reserved blocks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * If we _really_ wanted, we could use default values to call ext4_group_add()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * allow the "remount" trick to work for arbitrary resizing, assuming enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * GDT blocks are reserved to grow to the desired size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ext4_fsblk_t n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ext4_fsblk_t o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ext4_grpblk_t last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) ext4_grpblk_t add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ext4_group_t group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) o_blocks_count = ext4_blocks_count(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (test_opt(sb, DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ext4_msg(sb, KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) "extending last group from %llu to %llu blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) o_blocks_count, n_blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) ext4_msg(sb, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) "filesystem too large to resize to %llu blocks safely",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) n_blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (n_blocks_count < o_blocks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ext4_warning(sb, "can't shrink FS - resize aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* Handle the remaining blocks in the last group only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (last == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) ext4_warning(sb, "need to use ext2online to resize further");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) add = EXT4_BLOCKS_PER_GROUP(sb) - last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (o_blocks_count + add < o_blocks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ext4_warning(sb, "blocks_count overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (o_blocks_count + add > n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) add = n_blocks_count - o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (o_blocks_count + add < n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) o_blocks_count + add, add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /* See if the device is actually as big as what was requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) ext4_warning(sb, "can't read last block, resize aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) err = ext4_group_extend_no_check(sb, o_blocks_count, add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) } /* ext4_group_extend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * Release the resize inode and drop the resize_inode feature if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * are no more reserved gdt blocks, and then convert the file system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * to enable meta_bg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ext4_fsblk_t nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) int i, ret, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) int credits = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (es->s_reserved_gdt_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ext4_error(sb, "Unexpected non-zero "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) "s_reserved_gdt_blocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* Do a quick sanity check of the resize inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (inode->i_blocks != 1 << (inode->i_blkbits -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) (9 - sbi->s_cluster_bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) goto invalid_resize_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) for (i = 0; i < EXT4_N_BLOCKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (i == EXT4_DIND_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (ei->i_data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto invalid_resize_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (ei->i_data[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) goto invalid_resize_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) credits += 3; /* block bitmap, bg descriptor, resize inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) BUFFER_TRACE(sbi->s_sbh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) err = ext4_journal_get_write_access(handle, sbi->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ext4_clear_feature_resize_inode(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ext4_set_feature_meta_bg(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) sbi->s_es->s_first_meta_bg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) err = ext4_handle_dirty_super(handle, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) ext4_free_blocks(handle, inode, NULL, nr, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) EXT4_FREE_BLOCKS_METADATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) EXT4_FREE_BLOCKS_FORGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) ei->i_data[EXT4_DIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) inode->i_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) err = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ext4_std_error(sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) ret = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) invalid_resize_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ext4_error(sb, "corrupted/inconsistent resize inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * @sb: super block of the fs to be resized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * @n_blocks_count: the number of blocks resides in the resized fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct ext4_new_flex_group_data *flex_gd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct ext4_super_block *es = sbi->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct inode *resize_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ext4_grpblk_t add, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) unsigned long n_desc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) unsigned long o_desc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) ext4_group_t o_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ext4_group_t n_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ext4_fsblk_t o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) ext4_fsblk_t n_blocks_count_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) unsigned long last_update_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int meta_bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /* See if the device is actually as big as what was requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ext4_warning(sb, "can't read last block, resize aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) o_blocks_count = ext4_blocks_count(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) "to %llu blocks", o_blocks_count, n_blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (n_blocks_count < o_blocks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /* On-line shrinking not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ext4_warning(sb, "can't shrink FS - resize aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (n_blocks_count == o_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Nothing need to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) n_group = ext4_get_group_number(sb, n_blocks_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) ext4_warning(sb, "resize would cause inodes_count overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) n_desc_blocks = num_desc_blocks(sb, n_group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) meta_bg = ext4_has_feature_meta_bg(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (ext4_has_feature_resize_inode(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (meta_bg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) ext4_error(sb, "resize_inode and meta_bg enabled "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) "simultaneously");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (n_desc_blocks > o_desc_blocks +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) le16_to_cpu(es->s_reserved_gdt_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) n_blocks_count_retry = n_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) n_desc_blocks = o_desc_blocks +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) le16_to_cpu(es->s_reserved_gdt_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) n_blocks_count = (ext4_fsblk_t)n_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) EXT4_BLOCKS_PER_GROUP(sb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) le32_to_cpu(es->s_first_data_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) n_group--; /* set to last group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (!resize_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) EXT4_IGET_SPECIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (IS_ERR(resize_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) ext4_warning(sb, "Error opening resize inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return PTR_ERR(resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) err = ext4_convert_meta_bg(sb, resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (resize_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) iput(resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) resize_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (n_blocks_count_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) n_blocks_count = n_blocks_count_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) n_blocks_count_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * Make sure the last group has enough space so that it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * guaranteed to have enough space for all metadata blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * that it might need to hold. (We might not need to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * the inode table blocks in the last block group, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * will be cases where this might be needed.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if ((ext4_group_first_block_no(sb, n_group) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) ext4_group_overhead_blocks(sb, n_group) + 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) n_blocks_count = ext4_group_first_block_no(sb, n_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) n_group--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) n_blocks_count_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (resize_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) iput(resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) resize_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /* extend the last group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (n_group == o_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) add = n_blocks_count - o_blocks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (add > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) err = ext4_group_extend_no_check(sb, o_blocks_count, add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (ext4_blocks_count(es) == n_blocks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) err = ext4_alloc_flex_bg_array(sb, n_group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) flex_gd = alloc_flex_gd(flexbg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (flex_gd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* Add flex groups. Note that a regular group is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * flex group with 1 group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) flexbg_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (jiffies - last_update_time > HZ * 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (last_update_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) ext4_msg(sb, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) "resized to %llu blocks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) ext4_blocks_count(es));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) last_update_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) err = ext4_flex_group_add(sb, resize_inode, flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (!err && n_blocks_count_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) n_blocks_count = n_blocks_count_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) n_blocks_count_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) free_flex_gd(flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) flex_gd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (resize_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) iput(resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) resize_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (flex_gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) free_flex_gd(flex_gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (resize_inode != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) iput(resize_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ext4_warning(sb, "error (%d) occurred during "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) "file system resize", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ext4_blocks_count(es));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }