^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ext4/indirect.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * linux/fs/ext4/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1992, 1993, 1994, 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Remy Card (card@masi.ibp.fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Laboratoire MASI - Institut Blaise Pascal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Universite Pierre et Marie Curie (Paris VI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * linux/fs/minix/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Goal-directed block allocation by Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * (sct@redhat.com), 1993, 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "ext4_jbd2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "truncate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <trace/events/ext4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __le32 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) } Indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) p->key = *(p->p = v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) p->bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * ext4_block_to_path - parse the block number into array of offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @inode: inode in question (we are only interested in its superblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @i_block: block number to be parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @offsets: array to store the offsets in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @boundary: set this non-zero if the referred-to block is likely to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * followed (on disk) by an indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * To store the locations of file's data ext4 uses a data structure common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * for UNIX filesystems - tree of pointers anchored in the inode, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * data blocks at leaves and indirect blocks in intermediate nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * This function translates the block number into path in that tree -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * return value is the path length and @offsets[n] is the offset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * pointer to (n+1)th node in the nth one. If @block is out of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * (negative or too large) warning is printed and zero returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Note: function doesn't find node addresses, so no IO is needed. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * we need to know is the capacity of indirect blocks (taken from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * inode->i_sb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Portability note: the last comparison (check that we fit into triple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * indirect block) is spelled differently, because otherwise on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * architecture with 32-bit longs and 8Kb pages we might get into trouble
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * if our filesystem had 8Kb blocks. We might use long long, but that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * kill us on x86. Oh, well, at least the sign propagation does not matter -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * i_block would have to be negative in the very beginning, so we would not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * get there at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int ext4_block_to_path(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ext4_lblk_t i_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ext4_lblk_t offsets[4], int *boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) const long direct_blocks = EXT4_NDIR_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) indirect_blocks = ptrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) double_blocks = (1 << (ptrs_bits * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int final = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (i_block < direct_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) final = direct_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } else if ((i_block -= direct_blocks) < indirect_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) offsets[n++] = EXT4_IND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) } else if ((i_block -= indirect_blocks) < double_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) offsets[n++] = EXT4_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) offsets[n++] = i_block >> ptrs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) offsets[n++] = EXT4_TIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) offsets[n++] = i_block >> (ptrs_bits * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) i_block + direct_blocks +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) indirect_blocks + double_blocks, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *boundary = final - 1 - (i_block & (ptrs - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * ext4_get_branch - read the chain of indirect blocks leading to data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @inode: inode in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @depth: depth of the chain (1 - direct pointer, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @offsets: offsets of pointers in inode/indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @chain: place to store the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @err: here we store the error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Function fills the array of triples <key, p, bh> and returns %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * if everything went OK or the pointer to the last filled triple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * (incomplete one) otherwise. Upon the return chain[i].key contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the number of (i+1)-th block in the chain (as it is stored in memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * i.e. little-endian 32-bit), chain[i].p contains the address of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * number (it points into struct inode for i==0 and into the bh->b_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * block for i>0 and NULL for i==0. In other words, it holds the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * numbers of the chain, addresses they were taken from (and where we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * verify that chain did not change) and buffer_heads hosting these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Function stops when it stumbles upon zero pointer (absent block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * (pointer to last triple returned, *@err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * or when it gets an IO error reading an indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * (ditto, *@err == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * or when it reads all @depth-1 indirect blocks successfully and finds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * the whole chain, all way to the data (returns %NULL, *err == 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Need to be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * down_read(&EXT4_I(inode)->i_data_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static Indirect *ext4_get_branch(struct inode *inode, int depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ext4_lblk_t *offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) Indirect chain[4], int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) Indirect *p = chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* i_data is not going away, no lock needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!p->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) while (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bh = sb_getblk(sb, le32_to_cpu(p->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!bh_uptodate_or_lock(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (ext4_read_bh(bh, 0, NULL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* validate block references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (ext4_check_indirect_blockref(inode, bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Reader: end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!p->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) no_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * ext4_find_near - find a place for allocation with sufficient locality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @ind: descriptor of indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * This function returns the preferred place for block allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * It is used when heuristic for sequential allocation fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Rules are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * + if there is a block to the left of our position - allocate near it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * + if pointer will live in indirect block - allocate near that block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * + if pointer will live in inode - allocate in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * cylinder group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * In the latter case we colour the starting block by the callers PID to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * prevent it from clashing with concurrent allocations for a different inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * in the same block group. The PID is used here so that functionally related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * files will be close-by on-disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Caller must make sure that @ind is valid and will stay that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Try to find previous block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for (p = ind->p - 1; p >= start; p--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* No such thing, so let's try location of indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (ind->bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ind->bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * It is going to be referred to from the inode itself? OK, just put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * into the same cylinder group then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ext4_inode_to_goal_block(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * ext4_find_goal - find a preferred place for allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @block: block we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @partial: pointer to the last triple within a chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Normally this function find the preferred place for block allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * returns it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Because this is only used for non-extent files, we limit the block nr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * to 32 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) Indirect *partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ext4_fsblk_t goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * XXX need to get goal block from mballoc's data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goal = ext4_find_near(inode, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * ext4_blks_to_allocate - Look up the block map and count the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * of direct blocks need to be allocated for the given branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @branch: chain of indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @k: number of blocks need for indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @blks: number of data blocks to be mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @blocks_to_boundary: the offset in the indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * return the total number of blocks to be allocate, including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * direct and indirect blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int blocks_to_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Simple case, [t,d]Indirect block(s) has not allocated yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * then it's clear blocks on that path have not allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (k > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* right now we don't handle cross boundary allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (blks < blocks_to_boundary + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) count += blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) count += blocks_to_boundary + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) while (count < blks && count <= blocks_to_boundary &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) le32_to_cpu(*(branch[0].p + count)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * ext4_alloc_branch() - allocate and set up a chain of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @handle: handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @ar: structure describing the allocation request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * @indirect_blks: number of allocated indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @offsets: offsets (in the blocks) to store the pointers to next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @branch: place to store the chain in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * This function allocates blocks, zeroes out all but the last one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * links them into chain and (if we are synchronous) writes them to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * In other words, it prepares a branch that can be spliced onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * inode. It stores the information about that chain in the branch[], in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * the same format as ext4_get_branch() would do. We are calling it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * we had read the existing part of chain and partial points to the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * triple of that (one with zero ->key). Upon the exit we have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * picture as after the successful ext4_get_block(), except that in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * place chain is disconnected - *branch->p is still zero (we did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * set the last link), but branch->key contains the number that should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * be placed into *branch->p to fill that gap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * If allocation fails we free all blocks we've allocated (and forget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * their buffer_heads) and return the error value the from failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * as described above and return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int ext4_alloc_branch(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct ext4_allocation_request *ar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int indirect_blks, ext4_lblk_t *offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) Indirect *branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ext4_fsblk_t b, new_blocks[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int i, j, err, len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) for (i = 0; i <= indirect_blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (i == indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ar->inode, ar->goal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ar->flags & EXT4_MB_DELALLOC_RESERVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) NULL, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Simplify error cleanup... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) branch[i+1].bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) branch[i].key = cpu_to_le32(new_blocks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) BUFFER_TRACE(bh, "call get_create_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) err = ext4_journal_get_create_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) memset(bh->b_data, 0, bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) b = new_blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (i == indirect_blks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) len = ar->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) for (j = 0; j < len; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *p++ = cpu_to_le32(b++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) BUFFER_TRACE(bh, "marking uptodate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (i == indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Free data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ar->len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) for (; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * We want to ext4_forget() only freshly allocated indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * blocks. Buffer for new_blocks[i] is at branch[i+1].bh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * (buffer at branch[0].bh is indirect block / inode already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * existing before ext4_alloc_branch() was called). Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * because blocks are freshly allocated, we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * revoke them which is why we don't set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * EXT4_FREE_BLOCKS_METADATA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) new_blocks[i], 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * ext4_splice_branch() - splice the allocated branch onto inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @handle: handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @ar: structure describing the allocation request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @where: location of missing link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @num: number of indirect blocks we are adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * This function fills the missing link and does all housekeeping needed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * inode (->i_blocks, etc.). In case of success we end up with the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * chain to new block and return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int ext4_splice_branch(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct ext4_allocation_request *ar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) Indirect *where, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ext4_fsblk_t current_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * If we're splicing into a [td]indirect block (as opposed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * inode) then we need to get write access to the [td]indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * before the splice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (where->bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) BUFFER_TRACE(where->bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) err = ext4_journal_get_write_access(handle, where->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* That's it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *where->p = where->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Update the host buffer_head or inode to point to more just allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * direct blocks blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (num == 0 && ar->len > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) current_block = le32_to_cpu(where->key) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) for (i = 1; i < ar->len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *(where->p + i) = cpu_to_le32(current_block++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* We are done with atomic stuff, now do the rest of housekeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* had we spliced it onto indirect block? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (where->bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * If we spliced it onto an indirect block, we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * altered the inode. Note however that if it is being spliced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * onto an indirect block at the very end of the file (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * file is growing) then we *will* alter the inode to reflect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * the new i_size. But that is not done here - it is done in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) jbd_debug(5, "splicing indirect only\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * OK, we spliced it into the inode itself on a direct block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) err = ext4_mark_inode_dirty(handle, ar->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) jbd_debug(5, "splicing direct\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) for (i = 1; i <= num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * branch[i].bh is newly allocated, so there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * need to revoke the block, which is why we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * need to set EXT4_FREE_BLOCKS_METADATA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) EXT4_FREE_BLOCKS_FORGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ar->len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * The ext4_ind_map_blocks() function handles non-extents inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * (i.e., using the traditional indirect/double-indirect i_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * scheme) for ext4_map_blocks().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Allocation strategy is simple: if we have to allocate something, we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * have to go the whole way to leaf. So let's do it before attaching anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * to tree, set linkage between the newborn blocks, write them if sync is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * required, recheck the path, free and repeat if check fails, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * set the last missing link (that will protect us from any truncate-generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * removals - all blocks on the path are immune now) and possibly force the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * write on the parent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * That has a nice additional property: no special recovery from the failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * allocations is needed - we simply release blocks and do not touch anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * reachable from inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * `handle' can be NULL if create == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * return > 0, # of blocks mapped or allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * return = 0, if plain lookup failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * return < 0, error case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * The ext4_ind_get_blocks() function should be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct ext4_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct ext4_allocation_request ar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ext4_lblk_t offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) Indirect chain[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) Indirect *partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int blocks_to_boundary = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ext4_fsblk_t first_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) depth = ext4_block_to_path(inode, map->m_lblk, offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &blocks_to_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) partial = ext4_get_branch(inode, depth, offsets, chain, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Simplest case - block found, no allocation needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) first_block = le32_to_cpu(chain[depth - 1].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*map more blocks*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) while (count < map->m_len && count <= blocks_to_boundary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ext4_fsblk_t blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) blk = le32_to_cpu(*(chain[depth-1].p + count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (blk == first_block + count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Next simple case - plain lookup failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Count number blocks in a subtree under 'partial'. At each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * level we count number of complete empty subtrees beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * current offset and then descend into the subtree only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * partially beyond current offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) for (i = partial - chain + 1; i < depth; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) count = count * epb + (epb - offsets[i] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Fill in size of a hole we found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) map->m_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) map->m_len = min_t(unsigned int, map->m_len, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Failed read of indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (err == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Okay, we need to do block allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (ext4_has_feature_bigalloc(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "non-extent mapped inodes with bigalloc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Set up for the direct block allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) memset(&ar, 0, sizeof(ar));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ar.inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ar.logical = map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ar.flags = EXT4_MB_HINT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ar.flags |= EXT4_MB_DELALLOC_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ar.flags |= EXT4_MB_USE_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* the number of blocks need to allocate for [d,t]indirect blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) indirect_blks = (chain + depth) - partial - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Next look up the indirect map to count the totoal number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * direct blocks to allocate for this branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ar.len = ext4_blks_to_allocate(partial, indirect_blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) map->m_len, blocks_to_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Block out ext4_truncate while we alter the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) err = ext4_alloc_branch(handle, &ar, indirect_blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) offsets + (partial - chain), partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * The ext4_splice_branch call will free and forget any buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * on the new chain if there is a failure, but that risks using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * up transaction credits, especially for bitmaps where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * credits cannot be returned. Can we handle this somehow? We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * may need to return -EAGAIN upwards in the worst case. --sct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) map->m_flags |= EXT4_MAP_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ext4_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) count = ar.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) map->m_flags |= EXT4_MAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) map->m_pblk = le32_to_cpu(chain[depth-1].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) map->m_len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (count > blocks_to_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) map->m_flags |= EXT4_MAP_BOUNDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Clean up and exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) partial = chain + depth - 1; /* the whole chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) BUFFER_TRACE(partial->bh, "call brelse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Calculate number of indirect blocks touched by mapping @nrblocks logically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * contiguous blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * With N contiguous data blocks, we need at most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * 2 dindirect blocks, and 1 tindirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct buffer_head *bh, int *dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) err = ext4_handle_dirty_metadata(handle, inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * moment, get_block can be called only for blocks inside i_size since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * page cache has been already dropped and writes are blocked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * i_mutex. So we can safely drop the i_data_sem here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) BUG_ON(EXT4_JOURNAL(inode) == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *dropped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Truncate transactions can be complex and absolutely huge. So we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * be able to restart the transaction at a conventient checkpoint to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * sure we don't overflow the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * Try to extend this transaction for the purposes of truncation. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * extend fails, we restart transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static int ext4_ind_truncate_ensure_credits(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int revoke_creds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ext4_blocks_for_truncate(inode), revoke_creds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) BUFFER_TRACE(bh, "retaking write access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = ext4_journal_get_write_access(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Probably it should be a library function... search for first non-zero word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * or memcmp with zero_page, whatever is better for particular architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Linus?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static inline int all_zeroes(__le32 *p, __le32 *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) while (p < q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (*p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * ext4_find_shared - find the indirect blocks for partial truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * @inode: inode in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @depth: depth of the affected branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @chain: place to store the pointers to partial indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * @top: place to the (detached) top of branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * This is a helper function used by ext4_truncate().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * When we do truncate() we may have to clean the ends of several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * indirect blocks but leave the blocks themselves alive. Block is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * partially truncated if some data below the new i_size is referred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * from it (and it is on the path to the first completely truncated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * data block, indeed). We have to free the top of that path along
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * with everything to the right of the path. Since no allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * past the truncation point is possible until ext4_truncate()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * finishes, we may safely do the latter, but top of branch may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * require special attention - pageout below the truncation point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * might try to populate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * We atomically detach the top of branch from the tree, store the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * block number of its root in *@top, pointers to buffer_heads of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * partially truncated blocks - in @chain[].bh and pointers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * their last elements that should not be removed - in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * @chain[].p. Return value is the pointer to last filled element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * of @chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * The work left to caller to do the actual freeing of subtrees:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * a) free the subtree starting from *@top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * b) free the subtrees whose roots are stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * c) free the subtrees growing from the inode past the @chain[0].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * (no partially truncated stuff there). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static Indirect *ext4_find_shared(struct inode *inode, int depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ext4_lblk_t offsets[4], Indirect chain[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) __le32 *top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) Indirect *partial, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int k, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *top = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Make k index the deepest non-null offset + 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) for (k = depth; k > 1 && !offsets[k-1]; k--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) partial = ext4_get_branch(inode, k, offsets, chain, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Writer: pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) partial = chain + k-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * If the branch acquired continuation since we've looked at it -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * fine, it should all survive and (new) top doesn't belong to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!partial->key && *partial->p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Writer: end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto no_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * OK, we've found the last block that must survive. The rest of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * branch should be detached before unlocking. However, if that rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * of branch is all ours and does not grow immediately from the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * it's easier to cheat and just decrement partial->p.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (p == chain + k - 1 && p > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) p->p--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) *top = *p->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Nope, don't do this in ext4. Must leave the tree intact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) *p->p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Writer: end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) while (partial > p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) no_top:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Zero a number of block pointers in either an inode or an indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * If we restart the transaction we must again get write access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * indirect block for further modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * We release `count' blocks on disk, but (last - first) may be greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * than `count' because there can be holes in there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * Return 0 on success, 1 on invalid block range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * and < 0 on fatal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ext4_fsblk_t block_to_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned long count, __le32 *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) __le32 *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) int flags = EXT4_FREE_BLOCKS_VALIDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) else if (ext4_should_journal_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) flags |= EXT4_FREE_BLOCKS_FORGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!ext4_inode_block_valid(inode, block_to_free, count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) EXT4_ERROR_INODE(inode, "attempt to clear invalid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "blocks %llu len %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) (unsigned long long) block_to_free, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ext4_free_data_revoke_credits(inode, count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) for (p = first; p < last; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) *p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ext4_std_error(inode->i_sb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * ext4_free_data - free a list of data blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @handle: handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @inode: inode we are dealing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @this_bh: indirect buffer_head which contains *@first and *@last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @first: array of block numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @last: points immediately past the end of array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * We are freeing all blocks referred from that array (numbers are stored as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * little-endian 32-bit) and updating @inode->i_blocks appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * We accumulate contiguous runs of blocks to free. Conveniently, if these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * blocks are contiguous then releasing them at one time will only affect one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * actually use a lot of journal space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * @this_bh will be %NULL if @first and @last point into the inode's direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * block pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static void ext4_free_data(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct buffer_head *this_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) __le32 *first, __le32 *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned long count = 0; /* Number of blocks in the run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) block_to_free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ext4_fsblk_t nr; /* Current block # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) __le32 *p; /* Pointer into inode/ind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) for current block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (this_bh) { /* For indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) BUFFER_TRACE(this_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) err = ext4_journal_get_write_access(handle, this_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* Important: if we can't update the indirect pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * to the blocks, we can't free them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) for (p = first; p < last; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) nr = le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* accumulate blocks to free if they're contiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) block_to_free = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) block_to_free_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) } else if (nr == block_to_free + count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) err = ext4_clear_blocks(handle, inode, this_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) block_to_free, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) block_to_free_p, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) block_to_free = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) block_to_free_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!err && count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) count, block_to_free_p, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (this_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * The buffer head should have an attached journal head at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * point. However, if the data is corrupted and an indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * block pointed to itself, it would have been detached when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * the block was cleared. Check for this instead of OOPSing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ext4_handle_dirty_metadata(handle, inode, this_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) "circular indirect block detected at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) "block %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) (unsigned long long) this_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * ext4_free_branches - free an array of branches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * @handle: JBD handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @inode: inode we are dealing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @parent_bh: the buffer_head which contains *@first and *@last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @first: array of block numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @last: pointer immediately past the end of array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @depth: depth of the branches to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * We are freeing all blocks referred from these branches (numbers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * stored as little-endian 32-bit) and updating @inode->i_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void ext4_free_branches(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct buffer_head *parent_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __le32 *first, __le32 *last, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ext4_fsblk_t nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ext4_handle_is_aborted(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (depth--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) p = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) while (--p >= first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) nr = le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) continue; /* A hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!ext4_inode_block_valid(inode, nr, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) EXT4_ERROR_INODE(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) "invalid indirect mapped "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) "block %lu (level %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) (unsigned long) nr, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Go read the buffer for the next level down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bh = ext4_sb_bread(inode->i_sb, nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * A read failure? Report error and clear slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * (should be rare).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (IS_ERR(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) "Read failure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* This zaps the entire block. Bottom up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) BUFFER_TRACE(bh, "free child branches");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ext4_free_branches(handle, inode, bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) (__le32 *) bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) (__le32 *) bh->b_data + addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Everything below this pointer has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * released. Now let this top-of-subtree go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * We want the freeing of this indirect block to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * atomic in the journal with the updating of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * bitmap block which owns it. So make some room in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * We zero the parent pointer *after* freeing its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * pointee in the bitmaps, so if extend_transaction()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * for some reason fails to put the bitmap changes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * the release into the same transaction, recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * will merely complain about releasing a free block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * rather than leaking blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (ext4_handle_is_aborted(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (ext4_ind_truncate_ensure_credits(handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ext4_free_metadata_revoke_credits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) inode->i_sb, 1)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * The forget flag here is critical because if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * we are journaling (and not doing data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * journaling), we have to make sure a revoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * record is written to prevent the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * replay from overwriting the (former)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * indirect block if it gets reallocated as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * data block. This must happen in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * transaction where the data blocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * actually freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ext4_free_blocks(handle, inode, NULL, nr, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXT4_FREE_BLOCKS_METADATA|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) EXT4_FREE_BLOCKS_FORGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (parent_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * The block which we have just freed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * pointed to by an indirect block: journal it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) BUFFER_TRACE(parent_bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!ext4_journal_get_write_access(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) parent_bh)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) BUFFER_TRACE(parent_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) "call ext4_handle_dirty_metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ext4_handle_dirty_metadata(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) parent_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* We have reached the bottom of the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) BUFFER_TRACE(parent_bh, "free data blocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ext4_free_data(handle, inode, parent_bh, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) void ext4_ind_truncate(handle_t *handle, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) __le32 *i_data = ei->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ext4_lblk_t offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) Indirect chain[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) Indirect *partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) __le32 nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ext4_lblk_t last_block, max_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) unsigned blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) last_block = (inode->i_size + blocksize-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (last_block != max_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) n = ext4_block_to_path(inode, last_block, offsets, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (n == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * The orphan list entry will now protect us from any crash which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * occurs before the truncate completes, so it is now safe to propagate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * the new, shorter inode size (held for now in i_size) into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * on-disk inode. We do this via i_disksize, which is the value which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * ext4 *really* writes onto the disk inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ei->i_disksize = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (last_block == max_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * It is unnecessary to free any data blocks if last_block is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * equal to the indirect block limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else if (n == 1) { /* direct blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ext4_free_data(handle, inode, NULL, i_data+offsets[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) i_data + EXT4_NDIR_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) goto do_indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) partial = ext4_find_shared(inode, n, offsets, chain, &nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* Kill the top of shared branch (not detached) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (partial == chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* Shared branch grows from the inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ext4_free_branches(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) &nr, &nr+1, (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) *partial->p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * We mark the inode dirty prior to restart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * and prior to stop. No need for it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* Shared branch grows from an indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) BUFFER_TRACE(partial->bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) partial->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) partial->p+1, (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Clear the ends of indirect blocks on the shared branch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) (__le32*)partial->bh->b_data+addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) BUFFER_TRACE(partial->bh, "call brelse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) do_indirects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Kill the remaining (whole) subtrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) switch (offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) nr = i_data[EXT4_IND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) i_data[EXT4_IND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) case EXT4_IND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) nr = i_data[EXT4_DIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) i_data[EXT4_DIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) case EXT4_DIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) nr = i_data[EXT4_TIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) i_data[EXT4_TIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) case EXT4_TIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * ext4_ind_remove_space - remove space from the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * @handle: JBD handle for this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * @inode: inode we are dealing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * @start: First block to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * @end: One block after the last block to remove (exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * Free the blocks in the defined range (end is exclusive endpoint of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * range). This is used by ext4_punch_hole().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ext4_lblk_t start, ext4_lblk_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct ext4_inode_info *ei = EXT4_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) __le32 *i_data = ei->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ext4_lblk_t offsets[4], offsets2[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) Indirect chain[4], chain2[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) Indirect *partial, *partial2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) Indirect *p = NULL, *p2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ext4_lblk_t max_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) __le32 nr = 0, nr2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int n = 0, n2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) unsigned blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (end >= max_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) end = max_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if ((start >= end) || (start > max_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) n = ext4_block_to_path(inode, start, offsets, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) n2 = ext4_block_to_path(inode, end, offsets2, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) BUG_ON(n > n2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if ((n == 1) && (n == n2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* We're punching only within direct block range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ext4_free_data(handle, inode, NULL, i_data + offsets[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) i_data + offsets2[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else if (n2 > n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * Start and end are on a different levels so we're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * free partial block at start, and partial block at end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * the range. If there are some levels in between then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * do_indirects label will take care of that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (n == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * Start is at the direct block level, free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * everything to the end of the level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ext4_free_data(handle, inode, NULL, i_data + offsets[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) i_data + EXT4_NDIR_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto end_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (partial == chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* Shared branch grows from the inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) ext4_free_branches(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) &nr, &nr+1, (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) *partial->p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* Shared branch grows from an indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) BUFFER_TRACE(partial->bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) partial->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) partial->p+1, (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * Clear the ends of indirect blocks on the shared branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * at the start of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) partial->p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) (__le32 *)partial->bh->b_data+addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) end_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (nr2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (partial2 == chain2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * Remember, end is exclusive so here we're at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * the start of the next level we're not going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * to free. Everything was covered by the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * of the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto do_indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * ext4_find_shared returns Indirect structure which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * points to the last element which should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * removed by truncate. But this is end of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * in punch_hole so we need to point to the next element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) partial2->p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * Clear the ends of indirect blocks on the shared branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * at the end of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) while (partial2 > chain2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ext4_free_branches(handle, inode, partial2->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) (__le32 *)partial2->bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) partial2->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) (chain2+n2-1) - partial2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) partial2--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) goto do_indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Punch happened within the same level (n == n2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* Free top, but only if partial2 isn't its subtree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int level = min(partial - chain, partial2 - chain2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) int subtree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) for (i = 0; i <= level; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (offsets[i] != offsets2[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) subtree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (!subtree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (partial == chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* Shared branch grows from the inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ext4_free_branches(handle, inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) &nr, &nr+1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *partial->p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* Shared branch grows from an indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) BUFFER_TRACE(partial->bh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) partial->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) partial->p+1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!nr2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * ext4_find_shared returns Indirect structure which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * points to the last element which should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * removed by truncate. But this is end of the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * in punch_hole so we need to point to the next element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) partial2->p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) while (partial > chain || partial2 > chain2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int depth = (chain+n-1) - partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) int depth2 = (chain2+n2-1) - partial2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (partial > chain && partial2 > chain2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) partial->bh->b_blocknr == partial2->bh->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * We've converged on the same block. Clear the range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * then we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) partial->p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) partial2->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * The start and end partial branches may not be at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * level even though the punch happened within one level. So, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * give them a chance to arrive at the same level, then walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * them in step with each other until we converge on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (partial > chain && depth <= depth2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ext4_free_branches(handle, inode, partial->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) partial->p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) (__le32 *)partial->bh->b_data+addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (partial2 > chain2 && depth2 <= depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ext4_free_branches(handle, inode, partial2->bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) (__le32 *)partial2->bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) partial2->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) (chain2+n2-1) - partial2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) partial2--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) while (p && p > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) BUFFER_TRACE(p->bh, "call brelse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) brelse(p->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) p--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) while (p2 && p2 > chain2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) BUFFER_TRACE(p2->bh, "call brelse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) brelse(p2->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) p2--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) do_indirects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* Kill the remaining (whole) subtrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) switch (offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (++n >= n2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) nr = i_data[EXT4_IND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) i_data[EXT4_IND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) case EXT4_IND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (++n >= n2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) nr = i_data[EXT4_DIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) i_data[EXT4_DIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) case EXT4_DIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (++n >= n2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) nr = i_data[EXT4_TIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) i_data[EXT4_TIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case EXT4_TIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }