^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Written by Alex Tomas <alex@clusterfs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Architecture independence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2005, Bull S.A.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Written by Pierre Peiffer <pierre.peiffer@bull.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Extents support for EXT4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - ext4*_error() should be used in some situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - smart tree reduction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/jbd2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/highuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "ext4_jbd2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "ext4_extents.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <trace/events/ext4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * used by extent splitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) due to ENOSPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static __le32 ext4_extent_block_csum(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct ext4_extent_header *eh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __u32 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXT4_EXTENT_TAIL_OFFSET(eh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return cpu_to_le32(csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int ext4_extent_block_csum_verify(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct ext4_extent_header *eh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct ext4_extent_tail *et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!ext4_has_metadata_csum(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) et = find_ext4_extent_tail(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (et->et_checksum != ext4_extent_block_csum(inode, eh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void ext4_extent_block_csum_set(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct ext4_extent_header *eh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct ext4_extent_tail *et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!ext4_has_metadata_csum(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) et = find_ext4_extent_tail(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) et->et_checksum = ext4_extent_block_csum(inode, eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int ext4_split_extent_at(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ext4_lblk_t split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int split_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * moment, get_block can be called only for blocks inside i_size since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * page cache has been already dropped and writes are blocked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * i_mutex. So we can safely drop the i_data_sem here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) BUG_ON(EXT4_JOURNAL(inode) == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *dropped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Make sure 'handle' has at least 'check_cred' credits. If not, restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * transaction with 'restart_cred' credits. The function drops i_data_sem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * when restarting transaction and gets it after transaction is restarted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * The function returns 0 on success, 1 if transaction had to be restarted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * and < 0 in case of fatal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int check_cred, int restart_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int revoke_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * could return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * - EROFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * - ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (path->p_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* path points to block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) BUFFER_TRACE(path->p_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) err = ext4_journal_get_write_access(handle, path->p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * The extent buffer's verified bit will be set again in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * __ext4_ext_dirty(). We could leave an inconsistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * buffer if the extents updating procudure break off du
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * to some error happens, force to check it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) clear_buffer_verified(path->p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* path points to leaf/index in inode body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* we use in-core data, no need to protect them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * could return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * - EROFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * - ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * - EIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int __ext4_ext_dirty(const char *where, unsigned int line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (path->p_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* path points to block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = __ext4_handle_dirty_metadata(where, line, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) inode, path->p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Extents updating done, re-set verified flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) set_buffer_verified(path->p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* path points to leaf/index in inode body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) err = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define ext4_ext_dirty(handle, inode, path) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ext4_lblk_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Try to predict block placement assuming that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * filling in a file which will eventually be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * non-sparse --- i.e., in the case of libbfd writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * an ELF object sections out-of-order but in a way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * the eventually results in a contiguous object or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * executable file, or some database extending a table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * space file. However, this is actually somewhat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * non-ideal if we are writing a sparse file such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * qemu or KVM writing a raw image file that is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * to stay fairly sparse, since it will end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * fragmenting the file system's free space. Maybe we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * should have some hueristics or some way to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * userspace to pass a hint to file system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * especially if the latter case turns out to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * common.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (block > ext_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ext_pblk + (block - ext_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ext_pblk - (ext_block - block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* it looks like index is empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * try to find starting block from index itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (path[depth].p_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return path[depth].p_bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* OK. use inode's group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return ext4_inode_to_goal_block(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Allocation for a meta data block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static ext4_fsblk_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct ext4_extent *ex, int *err, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ext4_fsblk_t goal, newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) NULL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline int ext4_ext_space_block(struct inode *inode, int check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) / sizeof(struct ext4_extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!check && size > 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) size = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) / sizeof(struct ext4_extent_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!check && size > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) size = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline int ext4_ext_space_root(struct inode *inode, int check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) size = sizeof(EXT4_I(inode)->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) size -= sizeof(struct ext4_extent_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) size /= sizeof(struct ext4_extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!check && size > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) size = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) size = sizeof(EXT4_I(inode)->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) size -= sizeof(struct ext4_extent_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) size /= sizeof(struct ext4_extent_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!check && size > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct ext4_ext_path **ppath, ext4_lblk_t lblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int nofail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (nofail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ext4_ext_max_entries(struct inode *inode, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (depth == ext_depth(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) max = ext4_ext_space_root(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) max = ext4_ext_space_root_idx(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) max = ext4_ext_space_block(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) max = ext4_ext_space_block_idx(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ext4_fsblk_t block = ext4_ext_pblock(ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int len = ext4_ext_get_actual_len(ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * We allow neither:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * - zero length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * - overflow/wrap-around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (lblock + len <= lblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return ext4_inode_block_valid(inode, block, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int ext4_valid_extent_idx(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct ext4_extent_idx *ext_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ext4_inode_block_valid(inode, block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int ext4_valid_extent_entries(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct ext4_extent_header *eh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ext4_lblk_t lblk, ext4_fsblk_t *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned short entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ext4_lblk_t lblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ext4_lblk_t prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (eh->eh_entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) entries = le16_to_cpu(eh->eh_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* leaf entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * The logical block in the first entry should equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * the number in the index block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (depth != ext_depth(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) lblk != le32_to_cpu(ext->ee_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) while (entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!ext4_valid_extent(inode, ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Check for overlapping extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) lblock = le32_to_cpu(ext->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if ((lblock <= prev) && prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *pblk = ext4_ext_pblock(ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) prev = lblock + ext4_ext_get_actual_len(ext) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ext++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * The logical block in the first entry should equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * the number in the parent index block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (depth != ext_depth(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) lblk != le32_to_cpu(ext_idx->ei_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) while (entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!ext4_valid_extent_idx(inode, ext_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Check for overlapping index extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) lblock = le32_to_cpu(ext_idx->ei_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if ((lblock <= prev) && prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *pblk = ext4_idx_pblock(ext_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ext_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) prev = lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int __ext4_ext_check(const char *function, unsigned int line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct inode *inode, struct ext4_extent_header *eh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) const char *error_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int max = 0, err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) error_msg = "invalid magic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) error_msg = "unexpected eh_depth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (unlikely(eh->eh_max == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) error_msg = "invalid eh_max";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) max = ext4_ext_max_entries(inode, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) error_msg = "too large eh_max";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) error_msg = "invalid eh_entries";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) error_msg = "invalid extent entries";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (unlikely(depth > 32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) error_msg = "too large eh_depth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Verify checksum on non-root extent tree nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (ext_depth(inode) != depth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) !ext4_extent_block_csum_verify(inode, eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) error_msg = "extent tree corrupted";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) err = -EFSBADCRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) corrupted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ext4_error_inode_err(inode, function, line, 0, -err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) "pblk %llu bad header/extent: %s - magic %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) "entries %u, max %u(%u), depth %u(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) (unsigned long long) pblk, error_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) le16_to_cpu(eh->eh_magic),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) le16_to_cpu(eh->eh_entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) le16_to_cpu(eh->eh_max),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) max, le16_to_cpu(eh->eh_depth), depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #define ext4_ext_check(inode, eh, depth, pblk) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int ext4_ext_check_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void ext4_cache_extents(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct ext4_extent_header *eh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ext4_lblk_t prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned int status = EXTENT_STATUS_WRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (prev && (prev != lblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) EXTENT_STATUS_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ext4_ext_is_unwritten(ex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) status = EXTENT_STATUS_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ext4_es_cache_extent(inode, lblk, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ext4_ext_pblock(ex), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) prev = lblk + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static struct buffer_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __read_extent_tree_block(const char *function, unsigned int line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct inode *inode, struct ext4_extent_idx *idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int depth, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ext4_fsblk_t pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (flags & EXT4_EX_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) gfp_flags |= __GFP_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pblk = ext4_idx_pblock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (unlikely(!bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!bh_uptodate_or_lock(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) err = ext4_read_bh(bh, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) depth, pblk, le32_to_cpu(idx->ei_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) set_buffer_verified(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * If this is a leaf block, cache all of its entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ext4_extent_header *eh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ext4_cache_extents(inode, eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #define read_extent_tree_block(inode, idx, depth, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) (depth), (flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * This function is called to cache a file's extent information in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * extent status tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int ext4_ext_precache(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct ext4_ext_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int i = 0, depth, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0; /* not an extent-mapped inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) down_read(&ei->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Don't cache anything if there are no external extent blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) up_read(&ei->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (path == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) up_read(&ei->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) path[0].p_hdr = ext_inode_hdr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) while (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * If this is a leaf block or we've reached the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * the index block, go up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if ((i == depth) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) brelse(path[i].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) path[i].p_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bh = read_extent_tree_block(inode, path[i].p_idx++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) depth - i - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EXT4_EX_FORCE_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) path[i].p_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) path[i].p_hdr = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) up_read(&ei->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #ifdef EXT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int k, l = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ext_debug(inode, "path:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (k = 0; k <= l; k++, path++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (path->p_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ext_debug(inode, " %d->%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) le32_to_cpu(path->p_idx->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ext4_idx_pblock(path->p_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) } else if (path->p_ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ext_debug(inode, " %d:[%d]%d:%llu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) le32_to_cpu(path->p_ext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ext4_ext_is_unwritten(path->p_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ext4_ext_get_actual_len(path->p_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ext4_ext_pblock(path->p_ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ext_debug(inode, " []");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ext_debug(inode, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ex = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ext_debug(inode, "Displaying leaf extents\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ext4_ext_is_unwritten(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ext_debug(inode, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ext4_fsblk_t newblock, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (depth != level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct ext4_extent_idx *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) idx = path[level].p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) level, le32_to_cpu(idx->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ext4_idx_pblock(idx), newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) le32_to_cpu(ex->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ext4_ext_pblock(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ext4_ext_is_unwritten(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ext4_ext_get_actual_len(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ex++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #define ext4_ext_show_path(inode, path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) #define ext4_ext_show_leaf(inode, path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #define ext4_ext_show_move(inode, path, newblock, level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) void ext4_ext_drop_refs(struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int depth, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (i = 0; i <= depth; i++, path++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) brelse(path->p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) path->p_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * ext4_ext_binsearch_idx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * binary search for the closest index of the given block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * the header must be checked before calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ext4_ext_binsearch_idx(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct ext4_ext_path *path, ext4_lblk_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct ext4_extent_header *eh = path->p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct ext4_extent_idx *r, *l, *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ext_debug(inode, "binsearch for %u(idx): ", block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) l = EXT_FIRST_INDEX(eh) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) r = EXT_LAST_INDEX(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) while (l <= r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) m = l + (r - l) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (block < le32_to_cpu(m->ei_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) r = m - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) l = m + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) r, le32_to_cpu(r->ei_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) path->p_idx = l - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ext4_idx_pblock(path->p_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #ifdef CHECK_BINSEARCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct ext4_extent_idx *chix, *ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) chix = ix = EXT_FIRST_INDEX(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (k != 0 && le32_to_cpu(ix->ei_block) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) le32_to_cpu(ix[-1].ei_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) printk(KERN_DEBUG "k=%d, ix=0x%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) "first=0x%p\n", k,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ix, EXT_FIRST_INDEX(eh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) printk(KERN_DEBUG "%u <= %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) le32_to_cpu(ix->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) le32_to_cpu(ix[-1].ei_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) BUG_ON(k && le32_to_cpu(ix->ei_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) <= le32_to_cpu(ix[-1].ei_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (block < le32_to_cpu(ix->ei_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) chix = ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) BUG_ON(chix != path->p_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * ext4_ext_binsearch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * binary search for closest extent of the given block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * the header must be checked before calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ext4_ext_binsearch(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct ext4_ext_path *path, ext4_lblk_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct ext4_extent_header *eh = path->p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct ext4_extent *r, *l, *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (eh->eh_entries == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * this leaf is empty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * we get such a leaf in split/add case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ext_debug(inode, "binsearch for %u: ", block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) l = EXT_FIRST_EXTENT(eh) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) r = EXT_LAST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) while (l <= r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) m = l + (r - l) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (block < le32_to_cpu(m->ee_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) r = m - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) l = m + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) r, le32_to_cpu(r->ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) path->p_ext = l - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ext_debug(inode, " -> %d:%llu:[%d]%d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) le32_to_cpu(path->p_ext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ext4_ext_pblock(path->p_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ext4_ext_is_unwritten(path->p_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ext4_ext_get_actual_len(path->p_ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) #ifdef CHECK_BINSEARCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct ext4_extent *chex, *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) chex = ex = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) BUG_ON(k && le32_to_cpu(ex->ee_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) <= le32_to_cpu(ex[-1].ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (block < le32_to_cpu(ex->ee_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) chex = ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) BUG_ON(chex != path->p_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) eh = ext_inode_hdr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) eh->eh_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) eh->eh_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) eh->eh_magic = EXT4_EXT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) eh->eh_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct ext4_ext_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ext4_find_extent(struct inode *inode, ext4_lblk_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct ext4_ext_path **orig_path, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) short int depth, i, ppos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) gfp_t gfp_flags = GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (flags & EXT4_EX_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) gfp_flags |= __GFP_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) eh = ext_inode_hdr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (depth > path[0].p_maxdepth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) *orig_path = path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* account possible depth increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (unlikely(!path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) path[0].p_maxdepth = depth + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) path[0].p_hdr = eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) path[0].p_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) i = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ext4_cache_extents(inode, eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* walk through the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) while (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ext_debug(inode, "depth %d: num %d, max %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ext4_ext_binsearch_idx(inode, path + ppos, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) path[ppos].p_depth = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) path[ppos].p_ext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ret = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) eh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ppos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) path[ppos].p_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) path[ppos].p_hdr = eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) path[ppos].p_depth = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) path[ppos].p_ext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) path[ppos].p_idx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* find extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ext4_ext_binsearch(inode, path + ppos, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* if not an empty leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (path[ppos].p_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ext4_ext_show_path(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (orig_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) *orig_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * ext4_ext_insert_index:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * insert new index [@logical;@ptr] into the block at @curp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * check where to insert: before @curp or after @curp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct ext4_ext_path *curp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int logical, ext4_fsblk_t ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct ext4_extent_idx *ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) err = ext4_ext_get_access(handle, inode, curp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) "logical %d == ei_block %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) logical, le32_to_cpu(curp->p_idx->ei_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) >= le16_to_cpu(curp->p_hdr->eh_max))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) "eh_entries %d >= eh_max %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) le16_to_cpu(curp->p_hdr->eh_entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) le16_to_cpu(curp->p_hdr->eh_max));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* insert after */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ext_debug(inode, "insert new index %d after: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) logical, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ix = curp->p_idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* insert before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ext_debug(inode, "insert new index %d before: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) logical, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ix = curp->p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) BUG_ON(len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ext_debug(inode, "insert new index %d: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) "move %d indices from 0x%p to 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) logical, len, ix, ix + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ix->ei_block = cpu_to_le32(logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ext4_idx_store_pblock(ix, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) le16_add_cpu(&curp->p_hdr->eh_entries, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) err = ext4_ext_dirty(handle, inode, curp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ext4_std_error(inode->i_sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * ext4_ext_split:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * inserts new subtree into the path, using free index entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * at depth @at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * - allocates all needed blocks (new leaf and all intermediate index blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * - makes decision where to split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * - moves remaining extents and index entries (right to the split point)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * into the newly allocated blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * - initializes subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int ext4_ext_split(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct ext4_extent *newext, int at)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct ext4_extent_header *neh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct ext4_extent_idx *fidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int i = at, k, m, a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ext4_fsblk_t newblock, oldblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) __le32 border;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) gfp_t gfp_flags = GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) size_t ext_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (flags & EXT4_EX_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) gfp_flags |= __GFP_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* make decision: where to split? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* FIXME: now decision is simplest: at current extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* if current leaf will be split, then we should use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * border from split point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) border = path[depth].p_ext[1].ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ext_debug(inode, "leaf will be split."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) " next leaf starts at %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) le32_to_cpu(border));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) border = newext->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ext_debug(inode, "leaf will be added."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) " next leaf starts at %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) le32_to_cpu(border));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * If error occurs, then we break processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * and mark filesystem read-only. index won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * be inserted and tree will be in consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * state. Next mount will repair buffers too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Get array to track all allocated blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * We need this to handle errors and free blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * upon them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!ablocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* allocate all needed blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) for (a = 0; a < depth - at; a++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) newblock = ext4_ext_new_meta_block(handle, inode, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) newext, &err, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (newblock == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ablocks[a] = newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* initialize new leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) newblock = ablocks[--a];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (unlikely(newblock == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) EXT4_ERROR_INODE(inode, "newblock == 0!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) err = ext4_journal_get_create_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) neh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) neh->eh_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) neh->eh_magic = EXT4_EXT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) neh->eh_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) neh->eh_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* move remainder of path[depth] to the new leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (unlikely(path[depth].p_hdr->eh_entries !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) path[depth].p_hdr->eh_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) path[depth].p_hdr->eh_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) path[depth].p_hdr->eh_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* start copy from next extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ext4_ext_show_move(inode, path, newblock, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ex = EXT_FIRST_EXTENT(neh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) le16_add_cpu(&neh->eh_entries, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* zero out unused area in the extent block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ext_size = sizeof(struct ext4_extent_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ext4_extent_block_csum_set(inode, neh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) err = ext4_handle_dirty_metadata(handle, inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* correct old leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* create intermediate indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) k = depth - at - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (unlikely(k < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) EXT4_ERROR_INODE(inode, "k %d < 0!", k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ext_debug(inode, "create %d intermediate indices\n", k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* insert new index into current index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* current depth stored in i var */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) i = depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) while (k--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) oldblock = newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) newblock = ablocks[--a];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) bh = sb_getblk(inode->i_sb, newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) err = ext4_journal_get_create_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) neh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) neh->eh_entries = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) neh->eh_magic = EXT4_EXT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) neh->eh_depth = cpu_to_le16(depth - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) neh->eh_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) fidx = EXT_FIRST_INDEX(neh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) fidx->ei_block = border;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ext4_idx_store_pblock(fidx, oldblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) i, newblock, le32_to_cpu(border), oldblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* move remainder of path[i] to the new index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) EXT_LAST_INDEX(path[i].p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) le32_to_cpu(path[i].p_ext->ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* start copy indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) EXT_MAX_INDEX(path[i].p_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ext4_ext_show_move(inode, path, newblock, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) memmove(++fidx, path[i].p_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) sizeof(struct ext4_extent_idx) * m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) le16_add_cpu(&neh->eh_entries, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* zero out unused area in the extent block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ext_size = sizeof(struct ext4_extent_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) memset(bh->b_data + ext_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) inode->i_sb->s_blocksize - ext_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ext4_extent_block_csum_set(inode, neh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err = ext4_handle_dirty_metadata(handle, inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* correct old index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) err = ext4_ext_get_access(handle, inode, path + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err = ext4_ext_dirty(handle, inode, path + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* insert new index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) err = ext4_ext_insert_index(handle, inode, path + at,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) le32_to_cpu(border), newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (buffer_locked(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* free all allocated blocks in error case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) for (i = 0; i < depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (!ablocks[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) EXT4_FREE_BLOCKS_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) kfree(ablocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * ext4_ext_grow_indepth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * implements tree growing procedure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * - allocates new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * - moves top-level data (index block or leaf) into the new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * - initializes new top-level, creating index that points to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * just created block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct ext4_extent_header *neh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ext4_fsblk_t newblock, goal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) size_t ext_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* Try to prepend new index to old one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (ext_depth(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (goal > le32_to_cpu(es->s_first_data_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) flags |= EXT4_MB_HINT_TRY_GOAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goal--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goal = ext4_inode_to_goal_block(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) NULL, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (newblock == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (unlikely(!bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) err = ext4_journal_get_create_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ext_size = sizeof(EXT4_I(inode)->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* move top-level index/leaf into new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* zero out unused area in the extent block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* set size of new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) neh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* old root could have indexes or leaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * so calculate e_max right way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (ext_depth(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) neh->eh_magic = EXT4_EXT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) ext4_extent_block_csum_set(inode, neh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) err = ext4_handle_dirty_metadata(handle, inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Update top-level index: num,max,pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) neh = ext_inode_hdr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) neh->eh_entries = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (neh->eh_depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Root extent block becomes index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) EXT_FIRST_INDEX(neh)->ei_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) EXT_FIRST_EXTENT(neh)->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) le16_add_cpu(&neh->eh_depth, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) err = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * ext4_ext_create_new_leaf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * finds empty index and adds new leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * if no free index is found, then it requests in-depth growing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) unsigned int mb_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) unsigned int gb_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct ext4_extent *newext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct ext4_ext_path *curp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) int depth, i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) i = depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* walk up to the tree and look for free index entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) curp = path + depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) curp--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* we use already allocated block for index block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * so subsequent data blocks should be contiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (EXT_HAS_FREE_INDEX(curp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* if we found index with free entry, then use that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * entry: create all needed subtree and add new leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* refill path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) path = ext4_find_extent(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) (ext4_lblk_t)le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) ppath, gb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) err = PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* tree is full, time to grow in depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) err = ext4_ext_grow_indepth(handle, inode, mb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /* refill path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) path = ext4_find_extent(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) (ext4_lblk_t)le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ppath, gb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) err = PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * only first (depth 0 -> 1) produces free space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * in all other cases we have to split the grown tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* now we need to split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * search the closest allocated block to the left for *logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * and returns it at @logical + it's physical address at @phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * if *logical is the smallest allocated block, the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * returns 0 at @phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * return value contains 0 (success) or error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static int ext4_ext_search_left(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ext4_lblk_t *logical, ext4_fsblk_t *phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct ext4_extent_idx *ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int depth, ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (unlikely(path == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (depth == 0 && path->p_ext == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* usually extent in the path covers blocks smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * then *logical, but it can be that extent is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * first one in the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (*logical < le32_to_cpu(ex->ee_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) *logical, le32_to_cpu(ex->ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) while (--depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ix = path[depth].p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) "logical %d < ee_block %d + ee_len %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) *logical, le32_to_cpu(ex->ee_block), ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) *phys = ext4_ext_pblock(ex) + ee_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * Search the closest allocated block to the right for *logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * and returns it at @logical + it's physical address at @phys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * If not exists, return 0 and @phys is set to 0. We will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * 1 which means we found an allocated block and ret_ex is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * Or return a (< 0) error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static int ext4_ext_search_right(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ext4_lblk_t *logical, ext4_fsblk_t *phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct ext4_extent *ret_ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct ext4_extent_idx *ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) int depth; /* Note, NOT eh_depth; depth from top of tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (unlikely(path == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) *phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (depth == 0 && path->p_ext == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* usually extent in the path covers blocks smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * then *logical, but it can be that extent is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * first one in the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (*logical < le32_to_cpu(ex->ee_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) "first_extent(path[%d].p_hdr) != ex",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) while (--depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ix = path[depth].p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) "ix != EXT_FIRST_INDEX *logical %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) *logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) goto found_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) "logical %d < ee_block %d + ee_len %d!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) *logical, le32_to_cpu(ex->ee_block), ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* next allocated block in this leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ex++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto found_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* go up and search for index to the right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) while (--depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ix = path[depth].p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) goto got_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* we've gone up to the root and found no index to the right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) got_index:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* we've found index to the right, let's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * follow it and find the closest allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * block to the right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ix++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) while (++depth < path->p_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* subtract from p_depth to get proper eh_depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (IS_ERR(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) eh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) ix = EXT_FIRST_INDEX(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (IS_ERR(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) eh = ext_block_hdr(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ex = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) found_extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) *logical = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) *phys = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (ret_ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) *ret_ex = *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * ext4_ext_next_allocated_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * NOTE: it considers block number from index entry as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * allocated block. Thus, index entries have to be consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * with leaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ext4_lblk_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) ext4_ext_next_allocated_block(struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) BUG_ON(path == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (depth == 0 && path->p_ext == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) while (depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct ext4_ext_path *p = &path[depth];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (depth == path->p_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return le32_to_cpu(p->p_ext[1].ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return le32_to_cpu(p->p_idx[1].ei_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * ext4_ext_next_leaf_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * returns first allocated block from next leaf or EXT_MAX_BLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) BUG_ON(path == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /* zero-tree has no leaf blocks at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /* go to index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) while (depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (path[depth].p_idx !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) EXT_LAST_INDEX(path[depth].p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return (ext4_lblk_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) le32_to_cpu(path[depth].p_idx[1].ei_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * ext4_ext_correct_indexes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * if leaf gets modified and modified extent is first in the leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * then we have to correct all indexes above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * TODO: do we need to correct tree in all cases?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) __le32 border;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int k, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (unlikely(ex == NULL || eh == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "ex %p == NULL or eh %p == NULL", ex, eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /* there is no tree at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (ex != EXT_FIRST_EXTENT(eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /* we correct tree if first leaf got modified only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * TODO: we need correction if border is smaller than current one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) k = depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) border = path[depth].p_ext->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) err = ext4_ext_get_access(handle, inode, path + k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) path[k].p_idx->ei_block = border;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) err = ext4_ext_dirty(handle, inode, path + k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) while (k--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /* change all left-side indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) err = ext4_ext_get_access(handle, inode, path + k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) path[k].p_idx->ei_block = border;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) err = ext4_ext_dirty(handle, inode, path + k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static int ext4_can_extents_be_merged(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct ext4_extent *ex1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct ext4_extent *ex2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) unsigned short ext1_ee_len, ext2_ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) ext1_ee_len = ext4_ext_get_actual_len(ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ext2_ee_len = ext4_ext_get_actual_len(ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) le32_to_cpu(ex2->ee_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (ext4_ext_is_unwritten(ex1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (ext1_ee_len >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * This function tries to merge the "ex" extent to the next extent in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * It always tries to merge towards right. If you want to merge towards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * left, pass "ex - 1" as argument instead of "ex".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * 1 if they got merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static int ext4_ext_try_to_merge_right(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) struct ext4_extent *ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) unsigned int depth, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int merge_done = 0, unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) BUG_ON(path[depth].p_hdr == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) while (ex < EXT_LAST_EXTENT(eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* merge with next extent! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) unwritten = ext4_ext_is_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) + ext4_ext_get_actual_len(ex + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (ex + 1 < EXT_LAST_EXTENT(eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) len = (EXT_LAST_EXTENT(eh) - ex - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * sizeof(struct ext4_extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) memmove(ex + 1, ex + 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) le16_add_cpu(&eh->eh_entries, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) merge_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) WARN_ON(eh->eh_entries == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (!eh->eh_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return merge_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * This function does a very simple check to see if we can collapse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * an extent tree with a single extent tree leaf block into the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static void ext4_ext_try_to_merge_up(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) size_t s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) unsigned max_root = ext4_ext_space_root(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) ext4_fsblk_t blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if ((path[0].p_depth != 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * We need to modify the block allocation bitmap and the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * group descriptor to release the extent tree block. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * can't get the journal credits, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (ext4_journal_extend(handle, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * Copy the extent data up to the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) blk = ext4_idx_pblock(path[0].p_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) s = le16_to_cpu(path[1].p_hdr->eh_entries) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) sizeof(struct ext4_extent_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) s += sizeof(struct ext4_extent_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) path[1].p_maxdepth = path[0].p_maxdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) memcpy(path[0].p_hdr, path[1].p_hdr, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) path[0].p_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) path[0].p_hdr->eh_max = cpu_to_le16(max_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) brelse(path[1].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) ext4_free_blocks(handle, inode, NULL, blk, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * This function tries to merge the @ex extent to neighbours in the tree, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * tries to collapse the extent tree into the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static void ext4_ext_try_to_merge(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) struct ext4_extent *ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) unsigned int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) int merge_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) BUG_ON(path[depth].p_hdr == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (ex > EXT_FIRST_EXTENT(eh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (!merge_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) (void) ext4_ext_try_to_merge_right(inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) ext4_ext_try_to_merge_up(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * check if a portion of the "newext" extent overlaps with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * existing extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * If there is an overlap discovered, it updates the length of the newext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * such that there will be no overlap, and then returns 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * If there is no overlap found, it returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct ext4_extent *newext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) ext4_lblk_t b1, b2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) unsigned int depth, len1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) b1 = le32_to_cpu(newext->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) len1 = ext4_ext_get_actual_len(newext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (!path[depth].p_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * get the next allocated block if the extent in the path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * is before the requested block(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (b2 < b1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) b2 = ext4_ext_next_allocated_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (b2 == EXT_MAX_BLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) b2 = EXT4_LBLK_CMASK(sbi, b2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /* check for wrap through zero on extent logical start block*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (b1 + len1 < b1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) len1 = EXT_MAX_BLOCKS - b1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) newext->ee_len = cpu_to_le16(len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /* check for overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (b1 + len1 > b2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) newext->ee_len = cpu_to_le16(b2 - b1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * ext4_ext_insert_extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * tries to merge requested extent into the existing extent or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * inserts requested extent as new one into the tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * creating new leaf in the no-space case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct ext4_extent *newext, int gb_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) struct ext4_extent *ex, *fex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct ext4_extent *nearex; /* nearest extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct ext4_ext_path *npath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) int depth, len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) ext4_lblk_t next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) int mb_flags = 0, unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) mb_flags |= EXT4_MB_DELALLOC_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (unlikely(path[depth].p_hdr == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* try to insert block into found extent and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Try to see whether we should rather test the extent on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * right from ex, or from the left of ex. This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * ext4_find_extent() can return either extent on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * left, or on the right from the searched position. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * will make merging more effective.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (ex < EXT_LAST_EXTENT(eh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) (le32_to_cpu(ex->ee_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) ext4_ext_get_actual_len(ex) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) le32_to_cpu(newext->ee_block))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ex += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) goto prepend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) (le32_to_cpu(newext->ee_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) ext4_ext_get_actual_len(newext) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) le32_to_cpu(ex->ee_block)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) ex -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /* Try to append newex to the ex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (ext4_can_extents_be_merged(inode, ex, newext)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) "(from %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ext4_ext_get_actual_len(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) le32_to_cpu(ex->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ext4_ext_is_unwritten(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ext4_ext_get_actual_len(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) err = ext4_ext_get_access(handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) unwritten = ext4_ext_is_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) + ext4_ext_get_actual_len(newext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) nearex = ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) prepend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) /* Try to prepend newex to the ex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (ext4_can_extents_be_merged(inode, newext, ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) "(from %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ext4_ext_get_actual_len(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) le32_to_cpu(ex->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) ext4_ext_is_unwritten(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) ext4_ext_get_actual_len(ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) err = ext4_ext_get_access(handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) unwritten = ext4_ext_is_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) ex->ee_block = newext->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) + ext4_ext_get_actual_len(newext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) nearex = ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) goto merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) goto has_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* probably next leaf has space for us? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) fex = EXT_LAST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) next = EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) next = ext4_ext_next_leaf_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (next != EXT_MAX_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) ext_debug(inode, "next leaf block - %u\n", next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) BUG_ON(npath != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) npath = ext4_find_extent(inode, next, NULL, gb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (IS_ERR(npath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return PTR_ERR(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) BUG_ON(npath->p_depth != path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) eh = npath[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ext_debug(inode, "next leaf isn't full(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) le16_to_cpu(eh->eh_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) path = npath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) goto has_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ext_debug(inode, "next leaf has no free space(%d,%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * There is no free space in the found leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * We're gonna add a new leaf in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) mb_flags |= EXT4_MB_USE_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ppath, newext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) has_space:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) nearex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (!nearex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* there is no extent in this leaf, create first one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ext4_ext_pblock(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ext4_ext_get_actual_len(newext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) nearex = EXT_FIRST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (le32_to_cpu(newext->ee_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) > le32_to_cpu(nearex->ee_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /* Insert after */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) ext_debug(inode, "insert %u:%llu:[%d]%d before: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) "nearest %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) ext4_ext_pblock(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) ext4_ext_get_actual_len(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) nearex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) nearex++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* Insert before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) BUG_ON(newext->ee_block == nearex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) ext_debug(inode, "insert %u:%llu:[%d]%d after: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) "nearest %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) ext4_ext_pblock(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) ext4_ext_get_actual_len(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) nearex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) len = EXT_LAST_EXTENT(eh) - nearex + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) ext_debug(inode, "insert %u:%llu:[%d]%d: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) "move %d extents from 0x%p to 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) le32_to_cpu(newext->ee_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) ext4_ext_pblock(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) ext4_ext_is_unwritten(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) ext4_ext_get_actual_len(newext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) len, nearex, nearex + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) memmove(nearex + 1, nearex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) len * sizeof(struct ext4_extent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) le16_add_cpu(&eh->eh_entries, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) path[depth].p_ext = nearex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) nearex->ee_block = newext->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) nearex->ee_len = newext->ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) /* try to merge extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ext4_ext_try_to_merge(handle, inode, path, nearex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /* time to correct all indexes above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) err = ext4_ext_correct_indexes(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) err = ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ext4_ext_drop_refs(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) kfree(npath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static int ext4_fill_es_cache_info(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) ext4_lblk_t block, ext4_lblk_t num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct fiemap_extent_info *fieinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) ext4_lblk_t next, end = block + num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct extent_status es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) while (block <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (!ext4_es_lookup_extent(inode, block, &next, &es))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (ext4_es_is_unwritten(&es))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) flags |= FIEMAP_EXTENT_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (ext4_es_is_delayed(&es))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) flags |= (FIEMAP_EXTENT_DELALLOC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) FIEMAP_EXTENT_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (ext4_es_is_hole(&es))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) flags |= EXT4_FIEMAP_EXTENT_HOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (next == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) flags |= FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (flags & (FIEMAP_EXTENT_DELALLOC|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) EXT4_FIEMAP_EXTENT_HOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) es.es_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) es.es_pblk = ext4_es_pblock(&es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) err = fiemap_fill_next_extent(fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) (__u64)es.es_lblk << blksize_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) (__u64)es.es_pblk << blksize_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) (__u64)es.es_len << blksize_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (next == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) block = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * ext4_ext_determine_hole - determine hole around given block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * @inode: inode we lookup in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * @path: path in extent tree to @lblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * @lblk: pointer to logical block around which we want to determine hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * Determine hole length (and start if easily possible) around given logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * block. We don't try too hard to find the beginning of the hole but @path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * actually points to extent before @lblk, we provide it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * The function returns the length of a hole starting at @lblk. We update @lblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * to the beginning of the hole if we managed to find it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) ext4_lblk_t *lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) ext4_lblk_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (ex == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* there is no extent yet, so gap is [0;-] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) *lblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) len = EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) } else if (*lblk < le32_to_cpu(ex->ee_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) len = le32_to_cpu(ex->ee_block) - *lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) } else if (*lblk >= le32_to_cpu(ex->ee_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) + ext4_ext_get_actual_len(ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ext4_lblk_t next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) next = ext4_ext_next_allocated_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) BUG_ON(next == *lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) len = next - *lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * ext4_ext_put_gap_in_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * calculate boundaries of the gap that the requested block fits into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * and cache this gap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ext4_lblk_t hole_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct extent_status es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) hole_start + hole_len - 1, &es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (es.es_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* There's delayed extent containing lblock? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (es.es_lblk <= hole_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) hole_len = min(es.es_lblk - hole_start, hole_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) EXTENT_STATUS_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * ext4_ext_rm_idx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * removes index from the index block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) struct ext4_ext_path *path, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) ext4_fsblk_t leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /* free index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) path = path + depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) leaf = ext4_idx_pblock(path->p_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (unlikely(path->p_hdr->eh_entries == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) err = ext4_ext_get_access(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) len *= sizeof(struct ext4_extent_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) memmove(path->p_idx, path->p_idx + 1, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) le16_add_cpu(&path->p_hdr->eh_entries, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) err = ext4_ext_dirty(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) trace_ext4_ext_rm_idx(inode, leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) ext4_free_blocks(handle, inode, NULL, leaf, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) while (--depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) path--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) err = ext4_ext_get_access(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) path->p_idx->ei_block = (path+1)->p_idx->ei_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) err = ext4_ext_dirty(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) * ext4_ext_calc_credits_for_single_extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * This routine returns max. credits that needed to insert an extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * to the extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * When pass the actual path, the caller should calculate credits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * under i_data_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* probably there is space in leaf? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (le16_to_cpu(path[depth].p_hdr->eh_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) < le16_to_cpu(path[depth].p_hdr->eh_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * There are some space in the leaf tree, no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * need to account for leaf block credit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * bitmaps and block group descriptor blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * and other metadata blocks still need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * accounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* 1 bitmap, 1 block group descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) return ext4_chunk_trans_blocks(inode, nrblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) * How many index/leaf blocks need to change/allocate to add @extents extents?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) * If we add a single extent, then in the worse case, each tree level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * index/leaf need to be changed in case of the tree split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * If more extents are inserted, they could cause the whole tree split more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * than once, but this is really rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* If we are converting the inline data, only one is needed here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (ext4_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (extents <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) index = depth * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) index = depth * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static inline int get_default_free_blocks_flags(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) else if (ext4_should_journal_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return EXT4_FREE_BLOCKS_FORGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * ext4_rereserve_cluster - increment the reserved cluster count when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * freeing a cluster with a pending reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * @inode - file containing the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * @lblk - logical block in cluster to be reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * Increments the reserved cluster count and adjusts quota in a bigalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * file system when freeing a partial cluster containing at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * delayed and unwritten block. A partial cluster meeting that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * requirement will have a pending reservation. If so, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * defer reserved and allocated space accounting to a subsequent call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) spin_lock(&ei->i_block_reservation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) ei->i_reserved_data_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) spin_unlock(&ei->i_block_reservation_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) percpu_counter_add(&sbi->s_freeclusters_counter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ext4_remove_pending(inode, lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct ext4_extent *ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct partial_cluster *partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ext4_lblk_t from, ext4_lblk_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) unsigned short ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) ext4_fsblk_t last_pblk, pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) ext4_lblk_t num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /* only extent tail removal is allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (from < le32_to_cpu(ex->ee_block) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) ext4_error(sbi->s_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) "strange request: removal(2) %u-%u from %u:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) from, to, le32_to_cpu(ex->ee_block), ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) #ifdef EXTENTS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) spin_lock(&sbi->s_ext_stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) sbi->s_ext_blocks += ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) sbi->s_ext_extents++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (ee_len < sbi->s_ext_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) sbi->s_ext_min = ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (ee_len > sbi->s_ext_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) sbi->s_ext_max = ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (ext_depth(inode) > sbi->s_depth_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) sbi->s_depth_max = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) spin_unlock(&sbi->s_ext_stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) trace_ext4_remove_blocks(inode, ex, from, to, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * if we have a partial cluster, and it's different from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * cluster of the last block in the extent, we free it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (partial->state != initial &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) partial->pclu != EXT4_B2C(sbi, last_pblk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (partial->state == tofree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) flags = get_default_free_blocks_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (ext4_is_pending(inode, partial->lblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ext4_free_blocks(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) EXT4_C2B(sbi, partial->pclu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) sbi->s_cluster_ratio, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ext4_rereserve_cluster(inode, partial->lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) partial->state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) num = le32_to_cpu(ex->ee_block) + ee_len - from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) pblk = ext4_ext_pblock(ex) + ee_len - num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * We free the partial cluster at the end of the extent (if any),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * unless the cluster is used by another extent (partial_cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * state is nofree). If a partial cluster exists here, it must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * shared with the last block in the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) flags = get_default_free_blocks_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) /* partial, left end cluster aligned, right end unaligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) (EXT4_LBLK_CMASK(sbi, to) >= from) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) (partial->state != nofree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (ext4_is_pending(inode, to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) ext4_free_blocks(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) EXT4_PBLK_CMASK(sbi, last_pblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) sbi->s_cluster_ratio, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ext4_rereserve_cluster(inode, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) partial->state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) flags = get_default_free_blocks_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * For bigalloc file systems, we never free a partial cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * at the beginning of the extent. Instead, we check to see if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * need to free it on a subsequent call to ext4_remove_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) /* reset the partial cluster if we've freed past it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) partial->state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * If we've freed the entire extent but the beginning is not left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * cluster aligned and is not marked as ineligible for freeing we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * record the partial cluster at the beginning of the extent. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * wasn't freed by the preceding ext4_free_blocks() call, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * need to look farther to the left to determine if it's to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) * (not shared with another extent). Else, reset the partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * cluster - we're either done freeing or the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * extent is left cluster aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (partial->state == initial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) partial->pclu = EXT4_B2C(sbi, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) partial->lblk = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) partial->state = tofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) partial->state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * ext4_ext_rm_leaf() Removes the extents associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * blocks appearing between "start" and "end". Both "start"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * and "end" must appear in the same extent or EIO is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * @handle: The journal handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) * @inode: The files inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) * @path: The path to the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) * @partial_cluster: The cluster which we'll have to free if all extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * has been released from it. However, if this value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * negative, it's a cluster just to the right of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * punched region and it must not be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * @start: The first block to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * @end: The last block to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct ext4_ext_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct partial_cluster *partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) ext4_lblk_t start, ext4_lblk_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int err = 0, correct_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) int depth = ext_depth(inode), credits, revoke_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) ext4_lblk_t a, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) unsigned num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) ext4_lblk_t ex_ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) unsigned short ex_ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) unsigned unwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) ext4_fsblk_t pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) /* the header must be checked already in ext4_ext_remove_space() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (!path[depth].p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (unlikely(path[depth].p_hdr == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) /* find where to start removing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (!ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) ex = EXT_LAST_EXTENT(eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) ex_ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) ex_ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) trace_ext4_ext_rm_leaf(inode, start, ex, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) while (ex >= EXT_FIRST_EXTENT(eh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) ex_ee_block + ex_ee_len > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (ext4_ext_is_unwritten(ex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) unwritten = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) unwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) unwritten, ex_ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) path[depth].p_ext = ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) a = ex_ee_block > start ? ex_ee_block : start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) b = ex_ee_block+ex_ee_len - 1 < end ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) ex_ee_block+ex_ee_len - 1 : end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) ext_debug(inode, " border %u:%u\n", a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) /* If this extent is beyond the end of the hole, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (end < ex_ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) * We're going to skip this extent and move to another,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) * so note that its first cluster is in use to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * freeing it when removing blocks. Eventually, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * right edge of the truncated/punched region will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) * be just to the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (sbi->s_cluster_ratio > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) pblk = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) partial->pclu = EXT4_B2C(sbi, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) partial->state = nofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) ex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) ex_ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ex_ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) } else if (b != ex_ee_block + ex_ee_len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) "can not handle truncate %u:%u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) "on extent %u:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) start, end, ex_ee_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) ex_ee_block + ex_ee_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) } else if (a != ex_ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /* remove tail of the extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) num = a - ex_ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) /* remove whole extent: excellent! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * 3 for leaf, sb, and inode plus 2 (bmap and group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * descriptor) for each block group; assume two block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * groups plus ex_ee_len/blocks_per_block_group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) * the worst case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (ex == EXT_FIRST_EXTENT(eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) correct_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) credits += (ext_depth(inode)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * We may end up freeing some index blocks and data from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * punched range. Note that partial clusters are accounted for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * by ext4_free_data_revoke_credits().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) revoke_credits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ext4_free_metadata_revoke_credits(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) ext_depth(inode)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ext4_free_data_revoke_credits(inode, b - a + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) err = ext4_datasem_ensure_credits(handle, inode, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) credits, revoke_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) /* this extent is removed; mark slot entirely unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) ext4_ext_store_pblock(ex, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) ex->ee_len = cpu_to_le16(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * Do not mark unwritten if all the blocks in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * extent have been removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (unwritten && num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * If the extent was completely released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * we need to remove it from the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (end != EXT_MAX_BLOCKS - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * For hole punching, we need to scoot all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * extents up when an extent is removed so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * we dont have blank extents in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) sizeof(struct ext4_extent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) /* Now get rid of the one at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) memset(EXT_LAST_EXTENT(eh), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) sizeof(struct ext4_extent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) le16_add_cpu(&eh->eh_entries, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) ex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) ex_ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) ex_ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (correct_index && eh->eh_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) err = ext4_ext_correct_indexes(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * If there's a partial cluster and at least one extent remains in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * the leaf, free the partial cluster if it isn't shared with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * current extent. If it is shared with the current extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * we reset the partial cluster because we've reached the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * truncated/punched region and we're done removing blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (partial->pclu != EXT4_B2C(sbi, pblk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) int flags = get_default_free_blocks_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (ext4_is_pending(inode, partial->lblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) ext4_free_blocks(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) EXT4_C2B(sbi, partial->pclu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) sbi->s_cluster_ratio, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) ext4_rereserve_cluster(inode, partial->lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) partial->state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /* if this leaf is free, then we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * remove it from index block above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) err = ext4_ext_rm_idx(handle, inode, path, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) * ext4_ext_more_to_rm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) * returns 1 if current index has to be freed (even partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) ext4_ext_more_to_rm(struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) BUG_ON(path->p_idx == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * if truncate on deeper level happened, it wasn't partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * so we have to consider current index for truncation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ext4_lblk_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) int depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) struct ext4_ext_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct partial_cluster partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) int i = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) partial.pclu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) partial.lblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) partial.state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ext_debug(inode, "truncate since %u to %u\n", start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) /* probably first extent we're gonna free will be last in block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) depth + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) ext4_free_metadata_revoke_credits(inode->i_sb, depth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) trace_ext4_ext_remove_space(inode, start, end, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * Check if we are removing extents inside the extent tree. If that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * is the case, we are going to punch a hole inside the extent tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * so we have to check whether we need to split the extent covering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * the last block to remove so we can easily remove the part of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * in ext4_ext_rm_leaf().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (end < EXT_MAX_BLOCKS - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) ext4_lblk_t ee_block, ex_end, lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) ext4_fsblk_t pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) /* find extent for or closest extent to this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) path = ext4_find_extent(inode, end, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) /* Leaf not may not exist only if inode has no blocks at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) "path[%d].p_hdr == NULL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * See if the last block is inside the extent, if so split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * the extent at 'end' block so we can easily remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * tail of the first part of the split extent in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) * ext4_ext_rm_leaf().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (end >= ee_block && end < ex_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * If we're going to split the extent, note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) * the cluster containing the block after 'end' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * in use to avoid freeing it when removing blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (sbi->s_cluster_ratio > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) partial.pclu = EXT4_B2C(sbi, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) partial.state = nofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * Split the extent in two so that 'end' is the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * block in the first new extent. Also we should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * fail removing space due to ENOSPC so try to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) * reserved block if that happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) err = ext4_force_split_extent_at(handle, inode, &path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) end + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) partial.state == initial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) * If we're punching, there's an extent to the right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) * If the partial cluster hasn't been set, set it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) * that extent's first cluster and its state to nofree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * so it won't be freed should it contain blocks to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * removed. If it's already set (tofree/nofree), we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) * retrying and keep the original partial cluster info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * so a cluster marked tofree as a result of earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) * extent removal is not lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) lblk = ex_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) err = ext4_ext_search_right(inode, path, &lblk, &pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (pblk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) partial.pclu = EXT4_B2C(sbi, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) partial.state = nofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * We start scanning from right side, freeing all the blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * after i_size and walking into the tree depth-wise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) int k = i = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) while (--k > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) path[k].p_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) le16_to_cpu(path[k].p_hdr->eh_entries)+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (path == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) path[0].p_maxdepth = path[0].p_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) path[0].p_hdr = ext_inode_hdr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) while (i >= 0 && err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (i == depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) /* this is leaf block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) err = ext4_ext_rm_leaf(handle, inode, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) &partial, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /* root level has p_bh == NULL, brelse() eats this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) brelse(path[i].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) path[i].p_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) /* this is index block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) if (!path[i].p_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ext_debug(inode, "initialize header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) path[i].p_hdr = ext_block_hdr(path[i].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) if (!path[i].p_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) /* this level hasn't been touched yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) path[i].p_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) le16_to_cpu(path[i].p_hdr->eh_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) /* we were already here, see at next index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) path[i].p_idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) i, EXT_FIRST_INDEX(path[i].p_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) path[i].p_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (ext4_ext_more_to_rm(path + i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) /* go to the next level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ext_debug(inode, "move to level %d (block %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) i + 1, ext4_idx_pblock(path[i].p_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) memset(path + i + 1, 0, sizeof(*path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) bh = read_extent_tree_block(inode, path[i].p_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) depth - i - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) /* should we reset i_size? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) err = PTR_ERR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) /* Yield here to deal with large extent trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * Should be a no-op if we did IO above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (WARN_ON(i + 1 > depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) path[i + 1].p_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* save actual number of indexes since this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) * number is changed at the next iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* we finished processing this index, go up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (path[i].p_hdr->eh_entries == 0 && i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) /* index is empty, remove it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) * handle must be already prepared by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) * truncatei_leaf() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) err = ext4_ext_rm_idx(handle, inode, path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) /* root level has p_bh == NULL, brelse() eats this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) brelse(path[i].p_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) path[i].p_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) ext_debug(inode, "return to level %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) path->p_hdr->eh_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * if there's a partial cluster and we have removed the first extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * in the file, then we also free the partial cluster, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (partial.state == tofree && err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) int flags = get_default_free_blocks_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (ext4_is_pending(inode, partial.lblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) ext4_free_blocks(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) EXT4_C2B(sbi, partial.pclu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) sbi->s_cluster_ratio, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) ext4_rereserve_cluster(inode, partial.lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) partial.state = initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) /* TODO: flexible tree reduction should be here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (path->p_hdr->eh_entries == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) * truncate to zero freed all the tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) * so we need to correct eh_depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) err = ext4_ext_get_access(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) ext_inode_hdr(inode)->eh_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) ext_inode_hdr(inode)->eh_max =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) cpu_to_le16(ext4_ext_space_root(inode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) err = ext4_ext_dirty(handle, inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * called at mount time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) void ext4_ext_init(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * possible initialization would be here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) if (ext4_has_feature_extents(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) printk(KERN_INFO "EXT4-fs: file extents enabled"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) #ifdef AGGRESSIVE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) ", aggressive tests"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) #ifdef CHECK_BINSEARCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) ", check binsearch"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) #ifdef EXTENTS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) ", stats"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) #ifdef EXTENTS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) EXT4_SB(sb)->s_ext_min = 1 << 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) EXT4_SB(sb)->s_ext_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) * called at umount time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) void ext4_ext_release(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) if (!ext4_has_feature_extents(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) #ifdef EXTENTS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) sbi->s_ext_blocks, sbi->s_ext_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) sbi->s_ext_blocks / sbi->s_ext_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) ext4_fsblk_t ee_pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) unsigned int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) ee_pblock = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (ee_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) EXTENT_STATUS_WRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) /* FIXME!! we need to try to merge to left or right after zero-out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) ext4_fsblk_t ee_pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) unsigned int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) ee_pblock = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) * ext4_split_extent_at() splits an extent at given block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) * @handle: the journal handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) * @inode: the file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * @path: the path to the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * @split: the logical block where the extent is splitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) * @split_flags: indicates if the extent could be zeroout if split fails, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) * the states(init or unwritten) of new extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) * @flags: flags used to insert new extent to extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * of which are determined by split_flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) * There are two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) * a> the extent are splitted into two extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) * b> split is not needed, and just mark the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * return 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) static int ext4_split_extent_at(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) ext4_lblk_t split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) int split_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) ext4_fsblk_t newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) struct ext4_extent *ex, newex, orig_ex, zero_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) struct ext4_extent *ex2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) unsigned int ee_len, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) newblock = split - ee_block + ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) BUG_ON(split < ee_block || split >= (ee_block + ee_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) BUG_ON(!ext4_ext_is_unwritten(ex) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) split_flag & (EXT4_EXT_MAY_ZEROOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) EXT4_EXT_MARK_UNWRIT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) EXT4_EXT_MARK_UNWRIT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) if (split == ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * case b: block @split is the block that the extent begins with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * then we just change the state of the extent, and splitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * is not needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (split_flag & EXT4_EXT_MARK_UNWRIT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) ext4_ext_mark_initialized(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) ext4_ext_try_to_merge(handle, inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) err = ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) /* case a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) memcpy(&orig_ex, ex, sizeof(orig_ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) ex->ee_len = cpu_to_le16(split - ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) if (split_flag & EXT4_EXT_MARK_UNWRIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) * path may lead to new leaf, not to original leaf any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) * after ext4_ext_insert_extent() returns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) goto fix_extent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) ex2 = &newex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) ex2->ee_block = cpu_to_le32(split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) ext4_ext_store_pblock(ex2, newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (split_flag & EXT4_EXT_MARK_UNWRIT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) ext4_ext_mark_unwritten(ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) if (err != -ENOSPC && err != -EDQUOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) if (split_flag & EXT4_EXT_DATA_VALID1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) err = ext4_ext_zeroout(inode, ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) zero_ex.ee_block = ex2->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) zero_ex.ee_len = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) ext4_ext_get_actual_len(ex2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) ext4_ext_store_pblock(&zero_ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) ext4_ext_pblock(ex2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) err = ext4_ext_zeroout(inode, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) zero_ex.ee_block = ex->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) zero_ex.ee_len = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) ext4_ext_get_actual_len(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) ext4_ext_store_pblock(&zero_ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) err = ext4_ext_zeroout(inode, &orig_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) zero_ex.ee_block = orig_ex.ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) zero_ex.ee_len = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) ext4_ext_get_actual_len(&orig_ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) ext4_ext_store_pblock(&zero_ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) ext4_ext_pblock(&orig_ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) /* update the extent length and mark as initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) ex->ee_len = cpu_to_le16(ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) ext4_ext_try_to_merge(handle, inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) err = ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) /* update extent status tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) err = ext4_zeroout_es(inode, &zero_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* If we failed at this point, we don't know in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) * state the extent tree exactly is so don't try to fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) * length of the original extent as it may do even more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) * damage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) fix_extent_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) ex->ee_len = orig_ex.ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) * Ignore ext4_ext_dirty return value since we are already in error path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) * and err is a non-zero error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) * ext4_split_extents() splits an extent and mark extent which is covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) * by @map as split_flags indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) * It may result in splitting the extent into multiple extents (up to three)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) * There are three possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * a> There is no split required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * b> Splits in two extents: Split is happening at either end of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) * c> Splits in three extents: Somone is splitting in middle of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) static int ext4_split_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) int split_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) unsigned int ee_len, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) int unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) int split_flag1, flags1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) int allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) unwritten = ext4_ext_is_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) if (map->m_lblk + map->m_len < ee_block + ee_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) EXT4_EXT_MARK_UNWRIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (split_flag & EXT4_EXT_DATA_VALID2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) split_flag1 |= EXT4_EXT_DATA_VALID1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) err = ext4_split_extent_at(handle, inode, ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) map->m_lblk + map->m_len, split_flag1, flags1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) allocated = ee_len - (map->m_lblk - ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) * Update path is required because previous ext4_split_extent_at() may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) * result in split of original leaf or extent zeroout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) (unsigned long) map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) unwritten = ext4_ext_is_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) split_flag1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) if (map->m_lblk >= ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if (unwritten) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) EXT4_EXT_MARK_UNWRIT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) err = ext4_split_extent_at(handle, inode, ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) map->m_lblk, split_flag1, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) return err ? err : allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) * This function is called by ext4_ext_map_blocks() if someone tries to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * to an unwritten extent. It may result in splitting the unwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) * extent into multiple extents (up to three - one initialized and two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * unwritten).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) * There are three possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) * a> There is no split required: Entire extent should be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) * b> Splits in two extents: Write is happening at either end of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) * c> Splits in three extents: Somone is writing in middle of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) * Pre-conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) * - The extent pointed to by 'path' is unwritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) * - The extent pointed to by 'path' contains a superset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) * Post-conditions on success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) * - the returned value is the number of blocks beyond map->l_lblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) * that are allocated and initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) * It is guaranteed to be >= map->m_len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) static int ext4_ext_convert_to_initialized(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct ext4_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) struct ext4_extent_header *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) struct ext4_map_blocks split_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) struct ext4_extent zero_ex1, zero_ex2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) struct ext4_extent *ex, *abut_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) ext4_lblk_t ee_block, eof_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) unsigned int ee_len, depth, map_len = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) int allocated = 0, max_zeroout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) int split_flag = EXT4_EXT_DATA_VALID2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) ext_debug(inode, "logical block %llu, max_blocks %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) (unsigned long long)map->m_lblk, map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) >> inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) if (eof_block < map->m_lblk + map_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) eof_block = map->m_lblk + map_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) eh = path[depth].p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) zero_ex1.ee_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) zero_ex2.ee_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) /* Pre-conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) BUG_ON(!ext4_ext_is_unwritten(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) * Attempt to transfer newly initialized blocks from the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) * unwritten extent to its neighbor. This is much cheaper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) * than an insertion followed by a merge as those involve costly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) * memmove() calls. Transferring to the left is the common case in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) * followed by append writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) * Limitations of the current logic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) * - L1: we do not deal with writes covering the whole extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) * This would require removing the extent if the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) * is possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) * - L2: we only attempt to merge with an extent stored in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) * same extent tree node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if ((map->m_lblk == ee_block) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) /* See if we can merge left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) (map_len < ee_len) && /*L1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) ext4_lblk_t prev_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) ext4_fsblk_t prev_pblk, ee_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) unsigned int prev_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) abut_ex = ex - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) prev_lblk = le32_to_cpu(abut_ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) prev_len = ext4_ext_get_actual_len(abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) prev_pblk = ext4_ext_pblock(abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) ee_pblk = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) * A transfer of blocks from 'ex' to 'abut_ex' is allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) * upon those conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) * - C1: abut_ex is initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) * - C2: abut_ex is logically abutting ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) * - C3: abut_ex is physically abutting ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) * - C4: abut_ex can receive the additional blocks without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) * overflowing the (initialized) length limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) ((prev_lblk + prev_len) == ee_block) && /*C2*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) trace_ext4_ext_convert_to_initialized_fastpath(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) map, ex, abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) /* Shift the start of ex by 'map_len' blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) ex->ee_block = cpu_to_le32(ee_block + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) ext4_ext_store_pblock(ex, ee_pblk + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) ex->ee_len = cpu_to_le16(ee_len - map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) ext4_ext_mark_unwritten(ex); /* Restore the flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* Extend abut_ex by 'map_len' blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) /* Result: number of initialized blocks past m_lblk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) allocated = map_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) (map_len < ee_len) && /*L1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) ex < EXT_LAST_EXTENT(eh)) { /*L2*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) /* See if we can merge right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) ext4_lblk_t next_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) ext4_fsblk_t next_pblk, ee_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) unsigned int next_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) abut_ex = ex + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) next_lblk = le32_to_cpu(abut_ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) next_len = ext4_ext_get_actual_len(abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) next_pblk = ext4_ext_pblock(abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) ee_pblk = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * A transfer of blocks from 'ex' to 'abut_ex' is allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) * upon those conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) * - C1: abut_ex is initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) * - C2: abut_ex is logically abutting ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) * - C3: abut_ex is physically abutting ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) * - C4: abut_ex can receive the additional blocks without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) * overflowing the (initialized) length limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) ((map->m_lblk + map_len) == next_lblk) && /*C2*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) ((ee_pblk + ee_len) == next_pblk) && /*C3*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) trace_ext4_ext_convert_to_initialized_fastpath(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) map, ex, abut_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) /* Shift the start of abut_ex by 'map_len' blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) ex->ee_len = cpu_to_le16(ee_len - map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) ext4_ext_mark_unwritten(ex); /* Restore the flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) /* Extend abut_ex by 'map_len' blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) abut_ex->ee_len = cpu_to_le16(next_len + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) /* Result: number of initialized blocks past m_lblk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) allocated = map_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) if (allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) /* Mark the block containing both extents as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) /* Update path to point to the right extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) path[depth].p_ext = abut_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) allocated = ee_len - (map->m_lblk - ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) WARN_ON(map->m_lblk < ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) * It is safe to convert extent to initialized via explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) * zeroout only if extent is fully inside i_size or new_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (EXT4_EXT_MAY_ZEROOUT & split_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) max_zeroout = sbi->s_extent_max_zeroout_kb >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) (inode->i_sb->s_blocksize_bits - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * five cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * 1. split the extent into three extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) * 2. split the extent into two extents, zeroout the head of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) * extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) * 3. split the extent into two extents, zeroout the tail of the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) * extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) * 4. split the extent into two extents with out zeroout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) * 5. no splitting needed, just possibly zeroout the head and / or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) * tail of the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) split_map.m_lblk = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) split_map.m_len = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (max_zeroout && (allocated > split_map.m_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) if (allocated <= max_zeroout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) /* case 3 or 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) zero_ex1.ee_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) cpu_to_le32(split_map.m_lblk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) split_map.m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) zero_ex1.ee_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) cpu_to_le16(allocated - split_map.m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) ext4_ext_store_pblock(&zero_ex1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) ext4_ext_pblock(ex) + split_map.m_lblk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) split_map.m_len - ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) err = ext4_ext_zeroout(inode, &zero_ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) split_map.m_len = allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (split_map.m_lblk - ee_block + split_map.m_len <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) max_zeroout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) /* case 2 or 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) if (split_map.m_lblk != ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) zero_ex2.ee_block = ex->ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) ext4_ext_store_pblock(&zero_ex2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) ext4_ext_pblock(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) err = ext4_ext_zeroout(inode, &zero_ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) split_map.m_len += split_map.m_lblk - ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) split_map.m_lblk = ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) /* If we have gotten a failure, don't zero out status tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) err = ext4_zeroout_es(inode, &zero_ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) err = ext4_zeroout_es(inode, &zero_ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) return err ? err : allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) * This function is called by ext4_ext_map_blocks() from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) * ext4_get_blocks_dio_write() when DIO to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) * to an unwritten extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) * Writing to an unwritten extent may result in splitting the unwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) * extent into multiple initialized/unwritten extents (up to three)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) * There are three possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) * a> There is no split required: Entire extent should be unwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) * b> Splits in two extents: Write is happening at either end of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * c> Splits in three extents: Somone is writing in middle of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) * This works the same way in the case of initialized -> unwritten conversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) * One of more index blocks maybe needed if the extent tree grow after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) * the unwritten extent split. To prevent ENOSPC occur at the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) * complete, we need to split the unwritten extent before DIO submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) * the IO. The unwritten extent called at this time will be split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) * into three unwritten extent(at most). After IO complete, the part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) * being filled will be convert to initialized by the end_io callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) * via ext4_convert_unwritten_extents().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) * Returns the size of unwritten extent to be written on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) static int ext4_split_convert_extents(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) ext4_lblk_t eof_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) unsigned int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) int split_flag = 0, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) ext_debug(inode, "logical block %llu, max_blocks %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) (unsigned long long)map->m_lblk, map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) >> inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) if (eof_block < map->m_lblk + map->m_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) eof_block = map->m_lblk + map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) * It is safe to convert extent to initialized via explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) * zeroout only if extent is fully inside i_size or new_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) /* Convert to unwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) split_flag |= EXT4_EXT_DATA_VALID1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) /* Convert to initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) split_flag |= ee_block + ee_len <= eof_block ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) EXT4_EXT_MAY_ZEROOUT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) flags |= EXT4_GET_BLOCKS_PRE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) static int ext4_convert_unwritten_extents_endio(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) struct ext4_ext_path **ppath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) unsigned int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) ext_debug(inode, "logical block %llu, max_blocks %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) (unsigned long long)ee_block, ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) /* If extent is larger than requested it is a clear sign that we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) * have some extent state machine issues left. So extent_split is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) * required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) * TODO: Once all related issues will be fixed this situation should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) * illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) if (ee_block != map->m_lblk || ee_len > map->m_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) #ifdef CONFIG_EXT4_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) " len %u; IO logical block %llu, len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) inode->i_ino, (unsigned long long)ee_block, ee_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) (unsigned long long)map->m_lblk, map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) err = ext4_split_convert_extents(handle, inode, map, ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) EXT4_GET_BLOCKS_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) /* first mark the extent as initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) ext4_ext_mark_initialized(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) /* note: ext4_ext_correct_indexes() isn't needed here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) * borders are not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) ext4_ext_try_to_merge(handle, inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) /* Mark modified extent as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) err = ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) convert_initialized_extent(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) struct ext4_ext_path **ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) unsigned int *allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) struct ext4_ext_path *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) ext4_lblk_t ee_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) unsigned int ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) * Make sure that the extent is no bigger than we support with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) * unwritten extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) ext_debug(inode, "logical block %llu, max_blocks %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) (unsigned long long)ee_block, ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) if (ee_block != map->m_lblk || ee_len > map->m_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) err = ext4_split_convert_extents(handle, inode, map, ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) (unsigned long) map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) /* first mark the extent as unwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) /* note: ext4_ext_correct_indexes() isn't needed here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) * borders are not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) ext4_ext_try_to_merge(handle, inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) /* Mark modified extent as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) err = ext4_ext_dirty(handle, inode, path + path->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) map->m_flags |= EXT4_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) if (*allocated > map->m_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) *allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) map->m_len = *allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) struct ext4_ext_path **ppath, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) unsigned int allocated, ext4_fsblk_t newblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) struct ext4_ext_path __maybe_unused *path = *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) (unsigned long long)map->m_lblk, map->m_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) * When writing into unwritten space, we should not fail to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) * allocate metadata blocks for the new extent block if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) allocated, newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) /* get_block() before submitting IO, split the extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) if (flags & EXT4_GET_BLOCKS_PRE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) ret = ext4_split_convert_extents(handle, inode, map, ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) flags | EXT4_GET_BLOCKS_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * shouldn't get a 0 return when splitting an extent unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) * m_len is 0 (bug) or extent has been corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (unlikely(ret == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) "unexpected ret == 0, m_len = %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) map->m_flags |= EXT4_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) /* IO end_io complete, convert the filled extent to written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) if (flags & EXT4_GET_BLOCKS_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) err = ext4_convert_unwritten_extents_endio(handle, inode, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) ppath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) goto map_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) /* buffered IO cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) * repeat fallocate creation request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) * we already have an unwritten extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) map->m_flags |= EXT4_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) goto map_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) /* buffered READ or buffered write_begin() lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) * We have blocks reserved already. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) * return allocated blocks so that delalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) * won't do block reservation for us. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) * the buffer head will be unmapped so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) * a read from the block returns 0s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) map->m_flags |= EXT4_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) * For buffered writes, at writepage time, etc. Convert a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) * discovered unwritten extent to written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) * shouldn't get a 0 return when converting an unwritten extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) * unless m_len is 0 (bug) or extent has been corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) if (unlikely(ret == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) allocated = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) map->m_flags |= EXT4_MAP_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) map_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) map->m_flags |= EXT4_MAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) map->m_pblk = newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) if (allocated > map->m_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) map->m_len = allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) return err ? err : allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) * get_implied_cluster_alloc - check to see if the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) * allocation (in the map structure) overlaps with a cluster already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) * allocated in an extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) * @sb The filesystem superblock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) * @map The requested lblk->pblk mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) * @ex The extent structure which might contain an implied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) * cluster allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) * This function is called by ext4_ext_map_blocks() after we failed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) * find blocks that were already in the inode's extent tree. Hence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) * we know that the beginning of the requested region cannot overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) * the extent from the inode's extent tree. There are three cases we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) * want to catch. The first is this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) * |--- cluster # N--|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) * |--- extent ---| |---- requested region ---|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) * |==========|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) * The second case that we need to test for is this one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) * |--------- cluster # N ----------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) * |--- requested region --| |------- extent ----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) * |=======================|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) * The third case is when the requested region lies between two extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) * within the same cluster:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) * |------------- cluster # N-------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) * |----- ex -----| |---- ex_right ----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) * |------ requested region ------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) * |================|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) * In each of the above cases, we need to set the map->m_pblk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) * map->m_len so it corresponds to the return the extent labelled as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) * "|====|" from cluster #N, since it is already in use for data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) * as a new "allocated" block region. Otherwise, we will return 0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) * ext4_ext_map_blocks() will then allocate one or more new clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) * by calling ext4_mb_new_blocks().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) static int get_implied_cluster_alloc(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) struct ext4_extent *ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) struct ext4_ext_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) ext4_lblk_t ex_cluster_start, ex_cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) ext4_lblk_t rr_cluster_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) unsigned short ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) /* The extent passed in that we are trying to match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) ex_cluster_start = EXT4_B2C(sbi, ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) /* The requested region passed into ext4_map_blocks() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if ((rr_cluster_start == ex_cluster_end) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) (rr_cluster_start == ex_cluster_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) if (rr_cluster_start == ex_cluster_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) ee_start += ee_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) map->m_len = min(map->m_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) (unsigned) sbi->s_cluster_ratio - c_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * Check for and handle this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) * |--------- cluster # N-------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) * |------- extent ----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * |--- requested region ---|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * |===========|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (map->m_lblk < ee_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) map->m_len = min(map->m_len, ee_block - map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * Check for the case where there is already another allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) * block to the right of 'ex' but before the end of the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) * |------------- cluster # N-------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) * |----- ex -----| |---- ex_right ----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) * |------ requested region ------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) * |================|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (map->m_lblk > ee_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) ext4_lblk_t next = ext4_ext_next_allocated_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) map->m_len = min(map->m_len, next - map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) * Block allocation/map/preallocation routine for extents based files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) * Need to be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) * return > 0, number of blocks already mapped/allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) * if create == 0 and these are pre-allocated blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) * buffer head is unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) * otherwise blocks are mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) * return = 0, if plain look up failed (blocks have not been allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) * buffer head is unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) * return < 0, error case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) struct ext4_map_blocks *map, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) struct ext4_ext_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) struct ext4_extent newex, *ex, ex2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) ext4_fsblk_t newblock = 0, pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) int err = 0, depth, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) unsigned int allocated = 0, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) unsigned int allocated_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) struct ext4_allocation_request ar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) ext4_lblk_t cluster_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) /* find extent for this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) err = PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) * consistent leaf must not be empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) * this situation is possible, though, _during_ tree modification;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) * this is why assert can't be put in ext4_find_extent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) EXT4_ERROR_INODE(inode, "bad extent address "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) "lblock: %lu, depth: %d pblock %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) (unsigned long) map->m_lblk, depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) path[depth].p_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) ex = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) if (ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) unsigned short ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) * unwritten extents are treated as holes, except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * we split out initialized portions during a write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) ee_len = ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) /* if found extent covers block, simply return it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if (in_range(map->m_lblk, ee_block, ee_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) newblock = map->m_lblk - ee_block + ee_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) /* number of remaining blocks in the extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) allocated = ee_len - (map->m_lblk - ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) ext_debug(inode, "%u fit into %u:%d -> %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) map->m_lblk, ee_block, ee_len, newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) * If the extent is initialized check whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * caller wants to convert it to unwritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) if ((!ext4_ext_is_unwritten(ex)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) err = convert_initialized_extent(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) inode, map, &path, &allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) } else if (!ext4_ext_is_unwritten(ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) map->m_flags |= EXT4_MAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) map->m_pblk = newblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) if (allocated > map->m_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) map->m_len = allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) ret = ext4_ext_handle_unwritten_extents(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) handle, inode, map, &path, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) allocated, newblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) allocated = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) * requested block isn't allocated yet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) * we couldn't try to create block if create flag is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) ext4_lblk_t hole_start, hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) hole_start = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) * put just found gap into cache to speed up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) * subsequent requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) /* Update hole_len to reflect hole size after map->m_lblk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) if (hole_start != map->m_lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) hole_len -= map->m_lblk - hole_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) map->m_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) map->m_len = min_t(unsigned int, map->m_len, hole_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) * Okay, we need to do block allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) newex.ee_block = cpu_to_le32(map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) * If we are doing bigalloc, check to see if the extent returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) * by ext4_find_extent() implies a cluster we can use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) if (cluster_offset && ex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) ar.len = allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) newblock = map->m_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) goto got_allocated_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) /* find neighbour allocated blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) ar.lleft = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) ar.lright = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) /* Check if the extent after searching to the right implies a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) * cluster we can use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) if ((sbi->s_cluster_ratio > 1) && err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) ar.len = allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) newblock = map->m_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) goto got_allocated_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) * See if request is beyond maximum number of blocks we can have in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) * a single extent. For an initialized extent this limit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) * EXT_UNWRITTEN_MAX_LEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) if (map->m_len > EXT_INIT_MAX_LEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) map->m_len = EXT_INIT_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) map->m_len = EXT_UNWRITTEN_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) newex.ee_len = cpu_to_le16(map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) err = ext4_ext_check_overlap(sbi, inode, &newex, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) allocated = ext4_ext_get_actual_len(&newex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) /* allocate new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) ar.inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) ar.logical = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * We calculate the offset from the beginning of the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * for the logical block number, since when we allocate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) * physical cluster, the physical block should start at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) * same offset from the beginning of the cluster. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) * needed so that future calls to get_implied_cluster_alloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) * work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) ar.goal -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) ar.logical -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) if (S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) ar.flags = EXT4_MB_HINT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) /* disable in-core preallocation for non-regular files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) ar.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) ar.flags |= EXT4_MB_HINT_NOPREALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) ar.flags |= EXT4_MB_DELALLOC_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) ar.flags |= EXT4_MB_USE_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) newblock = ext4_mb_new_blocks(handle, &ar, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) if (!newblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) allocated_clusters = ar.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) ar.len = EXT4_C2B(sbi, ar.len) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) ar.goal, newblock, ar.len, allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) if (ar.len > allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) ar.len = allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) got_allocated_blocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) /* try to insert new extent into found leaf and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) pblk = newblock + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) ext4_ext_store_pblock(&newex, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) newex.ee_len = cpu_to_le16(ar.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) /* Mark unwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) ext4_ext_mark_unwritten(&newex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) map->m_flags |= EXT4_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) if (allocated_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) int fb_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) * free data blocks we just allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) * not a good idea to call discard here directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) * but otherwise we'd need to call it every free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) ext4_free_blocks(handle, inode, NULL, newblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) EXT4_C2B(sbi, allocated_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) fb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) * Reduce the reserved cluster count to reflect successful deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) * allocation of delayed allocated clusters or direct allocation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) * clusters discovered to be delayed allocated. Once allocated, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) * cluster is not included in the reserved count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) * When allocating delayed allocated clusters, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) * reduce the reserved cluster count and claim quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) ext4_da_update_reserve_space(inode, allocated_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) ext4_lblk_t lblk, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) * When allocating non-delayed allocated clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) * (from fallocate, filemap, DIO, or clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) * allocated when delalloc has been disabled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) * ext4_nonda_switch), reduce the reserved cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) * count by the number of allocated clusters that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) * have previously been delayed allocated. Quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) * has been claimed by ext4_mb_new_blocks() above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) * so release the quota reservations made for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) * previously delayed allocated clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) len = allocated_clusters << sbi->s_cluster_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) n = ext4_es_delayed_clu(inode, lblk, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) if (n > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) ext4_da_update_reserve_space(inode, (int) n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) * Cache the extent and update transaction to commit on fdatasync only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) * when it is _not_ an unwritten extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) ext4_update_inode_fsync_trans(handle, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) map->m_pblk = pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) map->m_len = ar.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) allocated = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) ext4_ext_show_leaf(inode, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) trace_ext4_ext_map_blocks_exit(inode, flags, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) err ? err : allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) return err ? err : allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) int ext4_ext_truncate(handle_t *handle, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) ext4_lblk_t last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) * TODO: optimization is possible here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) * Probably we need not scan at all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * because page truncation is enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) /* we have to know where to truncate from in crash case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) EXT4_I(inode)->i_disksize = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) err = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) last_block = (inode->i_size + sb->s_blocksize - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) >> EXT4_BLOCK_SIZE_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) err = ext4_es_remove_extent(inode, last_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) EXT_MAX_BLOCKS - last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) congestion_wait(BLK_RW_ASYNC, HZ/50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) retry_remove_space:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) congestion_wait(BLK_RW_ASYNC, HZ/50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) goto retry_remove_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) ext4_lblk_t len, loff_t new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) int ret2 = 0, ret3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) int depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) unsigned int credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) loff_t epos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) map.m_lblk = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) map.m_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) * Don't normalize the request if it can fit in one extent so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) * that it doesn't get unnecessarily split into multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) * extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (len <= EXT_UNWRITTEN_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) * credits to insert 1 extent into extent tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) credits = ext4_chunk_trans_blocks(inode, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) while (ret >= 0 && len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) * Recalculate credits when extent tree depth changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (depth != ext_depth(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) credits = ext4_chunk_trans_blocks(inode, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) ret = ext4_map_blocks(handle, inode, &map, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) ext4_debug("inode #%lu: block %u: len %u: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) "ext4_ext_map_blocks returned %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) inode->i_ino, map.m_lblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) map.m_len, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) ret2 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) map.m_lblk += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) map.m_len = len = len - ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) epos = (loff_t)map.m_lblk << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) if (new_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) if (epos > new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) epos = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (ext4_update_inode_size(inode, epos) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) inode->i_mtime = inode->i_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) ret2 = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) ret3 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) ret2 = ret3 ? ret3 : ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) if (unlikely(ret2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) if (ret == -ENOSPC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) ext4_should_retry_alloc(inode->i_sb, &retries)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) return ret > 0 ? ret2 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) static long ext4_zero_range(struct file *file, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) loff_t len, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) unsigned int max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) loff_t new_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) int credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) int partial_begin, partial_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) loff_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) ext4_lblk_t lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) trace_ext4_zero_range(inode, offset, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) /* Call ext4_force_commit to flush all data in case of data=journal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) if (ext4_should_journal_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) ret = ext4_force_commit(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) * Round up offset. This is not fallocate, we need to zero out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) * blocks, so convert interior block aligned part of the range to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) * unwritten and possibly manually zero out unaligned parts of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) start = round_up(offset, 1 << blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) end = round_down((offset + len), 1 << blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) if (start < offset || end > offset + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) partial_begin = offset & ((1 << blkbits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) partial_end = (offset + len) & ((1 << blkbits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) lblk = start >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) max_blocks = (end >> blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) if (max_blocks < lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) max_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) max_blocks -= lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) * Indirect files do not support unwritten extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) if (!(mode & FALLOC_FL_KEEP_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) (offset + len > inode->i_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) offset + len > EXT4_I(inode)->i_disksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) new_size = offset + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) ret = inode_newsize_ok(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) /* Wait all existing dio workers, newcomers will block on i_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) /* Preallocate the range including the unaligned edges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) if (partial_begin || partial_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) ret = ext4_alloc_file_blocks(file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) round_down(offset, 1 << blkbits) >> blkbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) (round_up((offset + len), 1 << blkbits) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) round_down(offset, 1 << blkbits)) >> blkbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) new_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) /* Zero range excluding the unaligned edges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) if (max_blocks > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) * Prevent page faults from reinstantiating pages we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) * released from page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) down_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) ret = ext4_break_layouts(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) up_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) ret = ext4_update_disksize_before_punch(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) up_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) /* Now release the pages and zero block aligned part of pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) truncate_pagecache_range(inode, start, end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) up_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) if (!partial_begin && !partial_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) * In worst case we have to writeout two nonadjacent unwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) * blocks and update the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) if (ext4_should_journal_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) credits += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) ext4_std_error(inode->i_sb, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) if (new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) ext4_update_inode_size(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) ret = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) goto out_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) /* Zero out partial block at the edges of the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) ret = ext4_zero_partial_blocks(handle, inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) if (file->f_flags & O_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) ext4_handle_sync(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) out_handle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) * preallocate space for a file. This implements ext4's fallocate file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) * operation, which gets called from sys_fallocate system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) * For block-mapped files, posix_fallocate should fall back to the method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) * of writing zeroes to the required new blocks (the same behavior which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) * expected for file systems which do not support fallocate() system call).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) loff_t new_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) unsigned int max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) ext4_lblk_t lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) * Encrypted inodes can't handle collapse range or insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) * range since we would need to re-encrypt blocks with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) * different IV or XTS tweak (which are based on the logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) * block number).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) if (IS_ENCRYPTED(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) /* Return error if mode is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) FALLOC_FL_INSERT_RANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) ext4_fc_start_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) if (mode & FALLOC_FL_PUNCH_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) ret = ext4_punch_hole(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) ret = ext4_convert_inline_data(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) if (mode & FALLOC_FL_COLLAPSE_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) ret = ext4_collapse_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) if (mode & FALLOC_FL_INSERT_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) ret = ext4_insert_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) if (mode & FALLOC_FL_ZERO_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) ret = ext4_zero_range(file, offset, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) trace_ext4_fallocate_enter(inode, offset, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) lblk = offset >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) * We only support preallocation for extent-based files only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) if (!(mode & FALLOC_FL_KEEP_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) (offset + len > inode->i_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) offset + len > EXT4_I(inode)->i_disksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) new_size = offset + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) ret = inode_newsize_ok(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) /* Wait all existing dio workers, newcomers will block on i_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) EXT4_I(inode)->i_sync_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) ext4_fc_stop_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) * This function convert a range of blocks to written extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) * The caller of this function will pass the start offset and the size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) * all unwritten extents within this range will be converted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) * written extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) * This function is called from the direct IO end io call back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) * function, to convert the fallocated extents after IO is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) loff_t offset, ssize_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) unsigned int max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) int ret = 0, ret2 = 0, ret3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) unsigned int credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) map.m_lblk = offset >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) if (!handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) * credits to insert 1 extent into extent tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) credits = ext4_chunk_trans_blocks(inode, max_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) while (ret >= 0 && ret < max_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) map.m_lblk += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) map.m_len = (max_blocks -= ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if (credits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) ret = ext4_map_blocks(handle, inode, &map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) EXT4_GET_BLOCKS_IO_CONVERT_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) ext4_warning(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) "inode #%lu: block %u: len %u: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) "ext4_ext_map_blocks returned %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) inode->i_ino, map.m_lblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) map.m_len, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) ret2 = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) if (credits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) ret3 = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) if (unlikely(ret3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) ret2 = ret3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) if (ret <= 0 || ret2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) return ret > 0 ? ret2 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) int ret = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) struct ext4_io_end_vec *io_end_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) * This is somewhat ugly but the idea is clear: When transaction is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) * reserved, everything goes into it. Otherwise we rather start several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) * smaller transactions for conversion of each extent separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) handle = ext4_journal_start_reserved(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) EXT4_HT_EXT_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) ret = ext4_convert_unwritten_extents(handle, io_end->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) io_end_vec->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) io_end_vec->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) err = ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) return ret < 0 ? ret : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) __u64 physical = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) __u64 length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) int blockbits = inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) u16 iomap_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) /* in-inode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) struct ext4_iloc iloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) int offset; /* offset of xattr in inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) error = ext4_get_inode_loc(inode, &iloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) physical = (__u64)iloc.bh->b_blocknr << blockbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) offset = EXT4_GOOD_OLD_INODE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) EXT4_I(inode)->i_extra_isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) physical += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) brelse(iloc.bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) iomap_type = IOMAP_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) } else if (EXT4_I(inode)->i_file_acl) { /* external block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) length = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) iomap_type = IOMAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) /* no in-inode or external block for xattr, so return -ENOENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) iomap->addr = physical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) iomap->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) iomap->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) iomap->type = iomap_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) iomap->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) loff_t length, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) error = ext4_iomap_xattr_fiemap(inode, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) if (error == 0 && (offset >= iomap->length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) static const struct iomap_ops ext4_iomap_xattr_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) .iomap_begin = ext4_iomap_xattr_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) u64 maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) maxbytes = inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) if (*len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) if (start > maxbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) * Shrink request scope to what the fs can actually handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) if (*len > maxbytes || (maxbytes - *len) < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) *len = maxbytes - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) error = ext4_ext_precache(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) * For bitmap files the maximum size limit could be smaller than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) * s_maxbytes, so check len here manually instead of just relying on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) * generic check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) error = ext4_fiemap_check_ranges(inode, start, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) return iomap_fiemap(inode, fieinfo, start, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) &ext4_iomap_xattr_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) __u64 start, __u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) ext4_lblk_t start_blk, len_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) __u64 last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) if (ext4_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) int has_inline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) down_read(&EXT4_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) has_inline = ext4_has_inline_data(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) up_read(&EXT4_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) if (has_inline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) error = ext4_ext_precache(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) error = fiemap_prep(inode, fieinfo, start, &len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) error = ext4_fiemap_check_ranges(inode, start, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) start_blk = start >> inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) if (last_blk >= EXT_MAX_BLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) last_blk = EXT_MAX_BLOCKS-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) * Walk the extent tree gathering extent information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) * and pushing extents back to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) * ext4_ext_shift_path_extents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) * Shift the extents of a path structure lying between path[depth].p_ext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) * if it is right shift or left shift operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) struct inode *inode, handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) enum SHIFT_DIRECTION SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) int depth, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) struct ext4_extent *ex_start, *ex_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) bool update = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) int credits, restart_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) while (depth >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) if (depth == path->p_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) ex_start = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) if (!ex_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) /* leaf + sb + inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) credits = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) /* extent tree + sb + inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) credits = depth + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) restart_credits = ext4_writepage_trans_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) err = ext4_datasem_ensure_credits(handle, inode, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) restart_credits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) while (ex_start <= ex_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) if (SHIFT == SHIFT_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) le32_add_cpu(&ex_start->ee_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) -shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) /* Try to merge to the left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) if ((ex_start >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) EXT_FIRST_EXTENT(path[depth].p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) ext4_ext_try_to_merge_right(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) path, ex_start - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) ex_last--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) ex_start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) le32_add_cpu(&ex_last->ee_block, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) ext4_ext_try_to_merge_right(inode, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) ex_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) ex_last--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) if (--depth < 0 || !update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) /* Update index too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) err = ext4_ext_get_access(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) if (SHIFT == SHIFT_LEFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) le32_add_cpu(&path[depth].p_idx->ei_block, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) err = ext4_ext_dirty(handle, inode, path + depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) /* we are done if current index is not a starting index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) * ext4_ext_shift_extents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) * All the extents which lies in the range from @start to the last allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) * block for the @inode are shifted either towards left or right (depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) * upon @SHIFT) by @shift blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) * On success, 0 is returned, error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) ext4_lblk_t start, ext4_lblk_t shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) enum SHIFT_DIRECTION SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) struct ext4_ext_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) int ret = 0, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) struct ext4_extent *extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) ext4_lblk_t stop, *iterator, ex_start, ex_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) ext4_lblk_t tmp = EXT_MAX_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) /* Let path point to the last extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) extent = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) if (!extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) stop = le32_to_cpu(extent->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) * For left shifts, make sure the hole on the left is big enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) * accommodate the shift. For right shifts, make sure the last extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) * won't be shifted beyond EXT_MAX_BLOCKS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) if (SHIFT == SHIFT_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) path = ext4_find_extent(inode, start - 1, &path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) extent = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) if (extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) ex_start = le32_to_cpu(extent->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) ex_end = le32_to_cpu(extent->ee_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) ext4_ext_get_actual_len(extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) ex_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) ex_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) if ((start == ex_start && shift > ex_start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) (shift > start - ex_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) if (shift > EXT_MAX_BLOCKS -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) (stop + ext4_ext_get_actual_len(extent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) * In case of left shift, iterator points to start and it is increased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) * till we reach stop. In case of right shift, iterator points to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) * and it is decreased till we reach start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) if (SHIFT == SHIFT_LEFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) iterator = &start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) iterator = &stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) if (tmp != EXT_MAX_BLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) *iterator = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) * Its safe to start updating extents. Start and stop are unsigned, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) * in case of right shift if extent with 0 block is reached, iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) * becomes NULL to indicate the end of the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) while (iterator && start <= stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) path = ext4_find_extent(inode, *iterator, &path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) depth = path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) extent = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) if (!extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) (unsigned long) *iterator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) if (SHIFT == SHIFT_LEFT && *iterator >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) le32_to_cpu(extent->ee_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) /* Hole, move to the next extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) path[depth].p_ext++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) *iterator = ext4_ext_next_allocated_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) tmp = *iterator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) if (SHIFT == SHIFT_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) extent = EXT_LAST_EXTENT(path[depth].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) *iterator = le32_to_cpu(extent->ee_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) ext4_ext_get_actual_len(extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) if (le32_to_cpu(extent->ee_block) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) *iterator = le32_to_cpu(extent->ee_block) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) /* Beginning is reached, end of the loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) iterator = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) /* Update path extent in case we need to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) while (le32_to_cpu(extent->ee_block) < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) extent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) path[depth].p_ext = extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) ret = ext4_ext_shift_path_extents(path, shift, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) handle, SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) /* iterator can be NULL which means we should break */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) * ext4_collapse_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) * This implements the fallocate's collapse range functionality for ext4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) * Returns: 0 and non-zero on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) ext4_lblk_t punch_start, punch_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) unsigned int credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) loff_t new_size, ioffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) * We need to test this early because xfstests assumes that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) * collapse range of (0, 1) will return EOPNOTSUPP if the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) * system does not support collapse range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) /* Collapse range works only on fs cluster size aligned regions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) trace_ext4_collapse_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) /* Call ext4_force_commit to flush all data in case of data=journal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) if (ext4_should_journal_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) ret = ext4_force_commit(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) * There is no need to overlap collapse range with EOF, in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) * it is effectively a truncate operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) if (offset + len >= inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) /* Currently just for extent based files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) /* Wait for existing dio to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) * Prevent page faults from reinstantiating pages we have released from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) * page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) down_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) ret = ext4_break_layouts(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) * Need to round down offset to be aligned with page size boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) * for page size > block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) ioffset = round_down(offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) * Write tail of the last page before removed range since it will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) * removed from the page cache below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) * Write data that will be shifted to preserve them when discarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) * page cache below. We are also protected from pages becoming dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) * by i_mmap_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) truncate_pagecache(inode, ioffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) credits = ext4_writepage_trans_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) ret = ext4_es_remove_extent(inode, punch_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) EXT_MAX_BLOCKS - punch_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) ret = ext4_ext_shift_extents(inode, handle, punch_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) punch_stop - punch_start, SHIFT_LEFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) new_size = inode->i_size - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) i_size_write(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) EXT4_I(inode)->i_disksize = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) ext4_handle_sync(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) ret = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) out_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) ext4_fc_stop_ineligible(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) out_mmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) up_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) * ext4_insert_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) * The data blocks starting from @offset to the EOF are shifted by @len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) * towards right to create a hole in the @inode. Inode size is increased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) * by len bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) * Returns 0 on success, error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) struct ext4_ext_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) struct ext4_extent *extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) unsigned int credits, ee_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) int ret = 0, depth, split_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) loff_t ioffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) * We need to test this early because xfstests assumes that an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) * insert range of (0, 1) will return EOPNOTSUPP if the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) * system does not support insert range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) /* Insert range works only on fs cluster size aligned regions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) trace_ext4_insert_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) /* Call ext4_force_commit to flush all data in case of data=journal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) if (ext4_should_journal_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) ret = ext4_force_commit(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) /* Currently just for extent based files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) /* Check whether the maximum file size would be exceeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) if (len > inode->i_sb->s_maxbytes - inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) ret = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) /* Offset must be less than i_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) if (offset >= inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) /* Wait for existing dio to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) * Prevent page faults from reinstantiating pages we have released from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) * page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) down_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) ret = ext4_break_layouts(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) * Need to round down to align start offset to page size boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) * for page size > block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) ioffset = round_down(offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) /* Write out all dirty pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) truncate_pagecache(inode, ioffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) credits = ext4_writepage_trans_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) goto out_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) /* Expand file to avoid data loss if there is error while shifting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) inode->i_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) EXT4_I(inode)->i_disksize += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) ret = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) path = ext4_find_extent(inode, offset_lblk, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) extent = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) if (extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) ee_start_lblk = le32_to_cpu(extent->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) ee_len = ext4_ext_get_actual_len(extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) * If offset_lblk is not the starting block of extent, split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) * the extent @offset_lblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) if ((offset_lblk > ee_start_lblk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) (offset_lblk < (ee_start_lblk + ee_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) if (ext4_ext_is_unwritten(extent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) split_flag = EXT4_EXT_MARK_UNWRIT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) EXT4_EXT_MARK_UNWRIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) ret = ext4_split_extent_at(handle, inode, &path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) offset_lblk, split_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) EXT4_EX_NOCACHE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) EXT4_GET_BLOCKS_PRE_IO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) EXT4_GET_BLOCKS_METADATA_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) ret = ext4_es_remove_extent(inode, offset_lblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) EXT_MAX_BLOCKS - offset_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) * if offset_lblk lies in a hole which is at start of file, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) * ee_start_lblk to shift extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) ret = ext4_ext_shift_extents(inode, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) len_lblk, SHIFT_RIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) ext4_handle_sync(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) out_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) ext4_fc_stop_ineligible(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) out_mmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) up_write(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) * ext4_swap_extents() - Swap extents between two inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) * @handle: handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) * @inode1: First inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) * @inode2: Second inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) * @lblk1: Start block for first inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) * @lblk2: Start block for second inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) * @count: Number of blocks to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) * @unwritten: Mark second inode's extents as unwritten after swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) * @erp: Pointer to save error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) * This helper routine does exactly what is promise "swap extents". All other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) * stuff such as page-cache locking consistency, bh mapping consistency or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) * extent's data copying must be performed by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) * Locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) * i_mutex is held for both inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) * i_data_sem is locked for write for both inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) * All pages from requested range are locked for both inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) ext4_swap_extents(handle_t *handle, struct inode *inode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) ext4_lblk_t count, int unwritten, int *erp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) struct ext4_ext_path *path1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) struct ext4_ext_path *path2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) int replaced_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) BUG_ON(!inode_is_locked(inode1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) BUG_ON(!inode_is_locked(inode2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) *erp = ext4_es_remove_extent(inode1, lblk1, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) *erp = ext4_es_remove_extent(inode2, lblk2, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) struct ext4_extent *ex1, *ex2, tmp_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) ext4_lblk_t e1_blk, e2_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) int e1_len, e2_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) int split = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) if (IS_ERR(path1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) *erp = PTR_ERR(path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) path1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) if (IS_ERR(path2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) *erp = PTR_ERR(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) path2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) ex1 = path1[path1->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) ex2 = path2[path2->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) /* Do we have something to swap ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) if (unlikely(!ex2 || !ex1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) e1_blk = le32_to_cpu(ex1->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) e2_blk = le32_to_cpu(ex2->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) e1_len = ext4_ext_get_actual_len(ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) e2_len = ext4_ext_get_actual_len(ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) /* Hole handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) if (!in_range(lblk1, e1_blk, e1_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) !in_range(lblk2, e2_blk, e2_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) ext4_lblk_t next1, next2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) /* if hole after extent, then go to next extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) next1 = ext4_ext_next_allocated_block(path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) next2 = ext4_ext_next_allocated_block(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) /* If hole before extent, then shift to that extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) if (e1_blk > lblk1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) next1 = e1_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) if (e2_blk > lblk2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) next2 = e2_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) /* Do we have something to swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) /* Move to the rightest boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) len = next1 - lblk1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) if (len < next2 - lblk2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) len = next2 - lblk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) if (len > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) lblk1 += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) lblk2 += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) /* Prepare left boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) if (e1_blk < lblk1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) split = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) *erp = ext4_force_split_extent_at(handle, inode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) &path1, lblk1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) if (e2_blk < lblk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) split = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) *erp = ext4_force_split_extent_at(handle, inode2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) &path2, lblk2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) /* ext4_split_extent_at() may result in leaf extent split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) * path must to be revalidated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) if (split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) /* Prepare right boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) if (len > e1_blk + e1_len - lblk1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) len = e1_blk + e1_len - lblk1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) if (len > e2_blk + e2_len - lblk2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) len = e2_blk + e2_len - lblk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) if (len != e1_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) split = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) *erp = ext4_force_split_extent_at(handle, inode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) &path1, lblk1 + len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) if (len != e2_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) split = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) *erp = ext4_force_split_extent_at(handle, inode2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) &path2, lblk2 + len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) if (*erp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) /* ext4_split_extent_at() may result in leaf extent split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) * path must to be revalidated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) if (split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) BUG_ON(e2_len != e1_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) /* Both extents are fully inside boundaries. Swap it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) tmp_ex = *ex1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) ex1->ee_len = cpu_to_le16(e2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) ex2->ee_len = cpu_to_le16(e1_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) ext4_ext_mark_unwritten(ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) if (ext4_ext_is_unwritten(&tmp_ex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) ext4_ext_mark_unwritten(ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) ext4_ext_try_to_merge(handle, inode2, path2, ex2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) ext4_ext_try_to_merge(handle, inode1, path1, ex1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) *erp = ext4_ext_dirty(handle, inode2, path2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) path2->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) *erp = ext4_ext_dirty(handle, inode1, path1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) path1->p_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) * Looks scarry ah..? second inode already points to new blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) * and it was successfully dirtied. But luckily error may happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) * only due to journal error, so full transaction will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) * aborted anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) if (unlikely(*erp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) lblk1 += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) lblk2 += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) replaced_count += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) ext4_ext_drop_refs(path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) kfree(path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) ext4_ext_drop_refs(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) kfree(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) path1 = path2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) return replaced_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) * ext4_clu_mapped - determine whether any block in a logical cluster has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) * been mapped to a physical cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) * @inode - file containing the logical cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) * @lclu - logical cluster of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) * Returns 1 if any block in the logical cluster is mapped, signifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) * that a physical cluster has been allocated for it. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) * returns 0. Can also return negative error codes. Derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) * ext4_ext_map_blocks().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) struct ext4_ext_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) int depth, mapped = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) struct ext4_extent *extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) ext4_lblk_t first_lblk, first_lclu, last_lclu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) /* search for the extent closest to the first block in the cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) err = PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) depth = ext_depth(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) * A consistent leaf must not be empty. This situation is possible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) * though, _during_ tree modification, and it's why an assert can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) * be put in ext4_find_extent().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) (unsigned long) EXT4_C2B(sbi, lclu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) depth, path[depth].p_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) extent = path[depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) /* can't be mapped if the extent tree is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) if (extent == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) first_lblk = le32_to_cpu(extent->ee_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) first_lclu = EXT4_B2C(sbi, first_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) * Three possible outcomes at this point - found extent spanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) * the target cluster, to the left of the target cluster, or to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) * right of the target cluster. The first two cases are handled here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) * The last case indicates the target cluster is not mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) if (lclu >= first_lclu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) last_lclu = EXT4_B2C(sbi, first_lblk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) ext4_ext_get_actual_len(extent) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) if (lclu <= last_lclu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) first_lblk = ext4_ext_next_allocated_block(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) first_lclu = EXT4_B2C(sbi, first_lblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) if (lclu == first_lclu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) return err ? err : mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) * Updates physical block address and unwritten status of extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) * starting at lblk start and of len. If such an extent doesn't exist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) * this function splits the extent tree appropriately to create an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) * extent like this. This function is called in the fast commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) * replay path. Returns 0 on success and error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) int len, int unwritten, ext4_fsblk_t pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) struct ext4_ext_path *path = NULL, *ppath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) path = ext4_find_extent(inode, start, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) if (le32_to_cpu(ex->ee_block) != start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) ext4_ext_get_actual_len(ex) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) /* We need to split this extent to match our extent first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) ppath = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) path = ext4_find_extent(inode, start, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) ppath = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) WARN_ON(le32_to_cpu(ex->ee_block) != start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) if (ext4_ext_get_actual_len(ex) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) ret = ext4_force_split_extent_at(NULL, inode, &ppath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) start + len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) path = ext4_find_extent(inode, start, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) if (unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) ext4_ext_mark_unwritten(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) ext4_ext_mark_initialized(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) ext4_ext_store_pblock(ex, pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) ext4_mark_inode_dirty(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) /* Try to shrink the extent tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) struct ext4_ext_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) ext4_lblk_t old_cur, cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) while (cur < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) path = ext4_find_extent(inode, cur, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) ext4_mark_inode_dirty(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) old_cur = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) if (cur <= old_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) cur = old_cur + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) ext4_ext_try_to_merge(NULL, inode, path, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) ext4_mark_inode_dirty(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) /* Check if *cur is a hole and if it is, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) map.m_lblk = *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) ret = ext4_map_blocks(NULL, inode, &map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) *cur = *cur + map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) /* Count number of blocks used by this inode and update i_blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) int ext4_ext_replay_set_iblocks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) struct ext4_ext_path *path = NULL, *path2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) ext4_lblk_t cur = 0, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) int numblks = 0, i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) ext4_fsblk_t cmp1, cmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) /* Determin the size of the file first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) /* Count the number of data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) while (cur < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) map.m_lblk = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) map.m_len = end - cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) ret = ext4_map_blocks(NULL, inode, &map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) numblks += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) cur = cur + map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) * Count the number of extent tree blocks. We do it by looking up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) * two successive extents and determining the difference between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) * their paths. When path is different for 2 successive extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) * we compare the blocks in the path at each level and increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) * iblocks by total number of differences found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) ret = skip_hole(inode, &cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) path = ext4_find_extent(inode, cur, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) numblks += path->p_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) while (cur < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) path = ext4_find_extent(inode, cur, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) ext4_ext_get_actual_len(ex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) ret = skip_hole(inode, &cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) path2 = ext4_find_extent(inode, cur, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) if (IS_ERR(path2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) ex = path2[path2->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) cmp1 = cmp2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) if (i <= path->p_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) cmp1 = path[i].p_bh ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) path[i].p_bh->b_blocknr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) if (i <= path2->p_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) cmp2 = path2[i].p_bh ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) path2[i].p_bh->b_blocknr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) if (cmp1 != cmp2 && cmp2 != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) numblks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) ext4_ext_drop_refs(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) kfree(path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) ext4_mark_inode_dirty(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) int ext4_ext_clear_bb(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) struct ext4_ext_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) struct ext4_extent *ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) ext4_lblk_t cur = 0, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) int j, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) /* Determin the size of the file first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) EXT4_EX_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) ex = path[path->p_depth].p_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) if (!ex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) while (cur < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) map.m_lblk = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) map.m_len = end - cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) ret = ext4_map_blocks(NULL, inode, &map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) if (!IS_ERR_OR_NULL(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) for (j = 0; j < path->p_depth; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) ext4_mb_mark_bb(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) path[j].p_block, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) ext4_fc_record_regions(inode->i_sb, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) 0, path[j].p_block, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) ext4_ext_drop_refs(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) ext4_fc_record_regions(inode->i_sb, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) map.m_lblk, map.m_pblk, map.m_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) cur = cur + map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) }