^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ext2/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1992, 1993, 1994, 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Remy Card (card@masi.ibp.fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Laboratoire MASI - Institut Blaise Pascal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Universite Pierre et Marie Curie (Paris VI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * linux/fs/minix/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Goal-directed block allocation by Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * (sct@dcs.ed.ac.uk), 1993, 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Big-endian to little-endian byte-swapping/bitmaps by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * David S. Miller (davem@caip.rutgers.edu), 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * 64-bit file support on 64-bit platforms by Jakub Jelinek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * (jj@sunsite.ms.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/highuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "ext2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int __ext2_write_inode(struct inode *inode, int do_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Test whether an inode is a fast symlink.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline int ext2_inode_is_fast_symlink(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int ea_blocks = EXT2_I(inode)->i_file_acl ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) (inode->i_sb->s_blocksize >> 9) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return (S_ISLNK(inode->i_mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) inode->i_blocks - ea_blocks == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void ext2_write_failed(struct address_space *mapping, loff_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (to > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) truncate_pagecache(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ext2_truncate_blocks(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Called at the last iput() if i_nlink is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void ext2_evict_inode(struct inode * inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct ext2_block_alloc_info *rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int want_delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!inode->i_nlink && !is_bad_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) want_delete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dquot_drop(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (want_delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) sb_start_intwrite(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* set dtime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __ext2_write_inode(inode, inode_needs_sync(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* truncate to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) inode->i_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (inode->i_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ext2_truncate_blocks(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ext2_xattr_delete_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) invalidate_inode_buffers(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ext2_discard_reservation(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) rsv = EXT2_I(inode)->i_block_alloc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXT2_I(inode)->i_block_alloc_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (unlikely(rsv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) kfree(rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (want_delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ext2_free_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) sb_end_intwrite(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) __le32 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) } Indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) p->key = *(p->p = v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) p->bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline int verify_chain(Indirect *from, Indirect *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) while (from <= to && from->key == *from->p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) from++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return (from > to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * ext2_block_to_path - parse the block number into array of offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @inode: inode in question (we are only interested in its superblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @i_block: block number to be parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @offsets: array to store the offsets in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @boundary: set this non-zero if the referred-to block is likely to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * followed (on disk) by an indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * To store the locations of file's data ext2 uses a data structure common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * for UNIX filesystems - tree of pointers anchored in the inode, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * data blocks at leaves and indirect blocks in intermediate nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * This function translates the block number into path in that tree -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * return value is the path length and @offsets[n] is the offset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * pointer to (n+1)th node in the nth one. If @block is out of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * (negative or too large) warning is printed and zero returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Note: function doesn't find node addresses, so no IO is needed. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * we need to know is the capacity of indirect blocks (taken from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * inode->i_sb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Portability note: the last comparison (check that we fit into triple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * indirect block) is spelled differently, because otherwise on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * architecture with 32-bit longs and 8Kb pages we might get into trouble
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * if our filesystem had 8Kb blocks. We might use long long, but that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * kill us on x86. Oh, well, at least the sign propagation does not matter -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * i_block would have to be negative in the very beginning, so we would not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * get there at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static int ext2_block_to_path(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) long i_block, int offsets[4], int *boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) const long direct_blocks = EXT2_NDIR_BLOCKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) indirect_blocks = ptrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) double_blocks = (1 << (ptrs_bits * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int final = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (i_block < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ext2_msg(inode->i_sb, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "warning: %s: block < 0", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } else if (i_block < direct_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) final = direct_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else if ( (i_block -= direct_blocks) < indirect_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) offsets[n++] = EXT2_IND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else if ((i_block -= indirect_blocks) < double_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) offsets[n++] = EXT2_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) offsets[n++] = i_block >> ptrs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) offsets[n++] = EXT2_TIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) offsets[n++] = i_block >> (ptrs_bits * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) final = ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ext2_msg(inode->i_sb, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) "warning: %s: block is too big", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *boundary = final - 1 - (i_block & (ptrs - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * ext2_get_branch - read the chain of indirect blocks leading to data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @inode: inode in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @depth: depth of the chain (1 - direct pointer, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @offsets: offsets of pointers in inode/indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @chain: place to store the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @err: here we store the error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Function fills the array of triples <key, p, bh> and returns %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * if everything went OK or the pointer to the last filled triple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * (incomplete one) otherwise. Upon the return chain[i].key contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * the number of (i+1)-th block in the chain (as it is stored in memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * i.e. little-endian 32-bit), chain[i].p contains the address of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * number (it points into struct inode for i==0 and into the bh->b_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * block for i>0 and NULL for i==0. In other words, it holds the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * numbers of the chain, addresses they were taken from (and where we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * verify that chain did not change) and buffer_heads hosting these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Function stops when it stumbles upon zero pointer (absent block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * (pointer to last triple returned, *@err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * or when it gets an IO error reading an indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * (ditto, *@err == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * or when it notices that chain had been changed while it was reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * (ditto, *@err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * or when it reads all @depth-1 indirect blocks successfully and finds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * the whole chain, all way to the data (returns %NULL, *err == 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static Indirect *ext2_get_branch(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int *offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) Indirect chain[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) Indirect *p = chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* i_data is not going away, no lock needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!p->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) while (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bh = sb_bread(sb, le32_to_cpu(p->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) read_lock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!verify_chain(chain, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) read_unlock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!p->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) changed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) read_unlock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) no_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * ext2_find_near - find a place for allocation with sufficient locality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @ind: descriptor of indirect block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * This function returns the preferred place for block allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * It is used when heuristic for sequential allocation fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Rules are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * + if there is a block to the left of our position - allocate near it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * + if pointer will live in indirect block - allocate near that block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * + if pointer will live in inode - allocate in the same cylinder group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * In the latter case we colour the starting block by the callers PID to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * prevent it from clashing with concurrent allocations for a different inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * in the same block group. The PID is used here so that functionally related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * files will be close-by on-disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Caller must make sure that @ind is valid and will stay that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct ext2_inode_info *ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __le32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ext2_fsblk_t bg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ext2_fsblk_t colour;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Try to find previous block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) for (p = ind->p - 1; p >= start; p--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* No such thing, so let's try location of indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (ind->bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return ind->bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * It is going to be referred from inode itself? OK, just put it into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * the same cylinder group then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) colour = (current->pid % 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return bg_start + colour;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * ext2_find_goal - find a preferred place for allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @block: block we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @partial: pointer to the last triple within a chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Returns preferred place for a block (the goal).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) Indirect *partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct ext2_block_alloc_info *block_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) block_i = EXT2_I(inode)->i_block_alloc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * try the heuristic for sequential allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * failing that at least try to get decent locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (block_i && (block == block_i->last_alloc_logical_block + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) && (block_i->last_alloc_physical_block != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return block_i->last_alloc_physical_block + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ext2_find_near(inode, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * ext2_blks_to_allocate: Look up the block map and count the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * of direct blocks need to be allocated for the given branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @branch: chain of indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @k: number of blocks need for indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @blks: number of data blocks to be mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @blocks_to_boundary: the offset in the indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * return the number of direct blocks to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int blocks_to_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Simple case, [t,d]Indirect block(s) has not allocated yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * then it's clear blocks on that path have not allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (k > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* right now don't hanel cross boundary allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (blks < blocks_to_boundary + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) count += blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) count += blocks_to_boundary + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) while (count < blks && count <= blocks_to_boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) && le32_to_cpu(*(branch[0].p + count)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * ext2_alloc_blocks: multiple allocate blocks needed for a branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @indirect_blks: the number of blocks need to allocate for indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @blks: the number of blocks need to allocate for direct blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @new_blocks: on return it will store the new block numbers for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * the indirect blocks(if needed) and the first direct block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int ext2_alloc_blocks(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ext2_fsblk_t goal, int indirect_blks, int blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ext2_fsblk_t new_blocks[4], int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int target, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ext2_fsblk_t current_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Here we try to allocate the requested multiple blocks at once,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * on a best-effort basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * To build a branch, we should allocate blocks for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * the indirect blocks(if not allocated yet), and at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the first direct block of this branch. That's the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * minimum number of blocks need to allocate(required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) target = blks + indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) count = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* allocating blocks for indirect blocks and direct blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) current_block = ext2_new_blocks(inode,goal,&count,err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (*err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto failed_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) target -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* allocate blocks for indirect blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) while (index < indirect_blks && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) new_blocks[index++] = current_block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* save the new block number for the first direct block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) new_blocks[index] = current_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* total number of blocks allocated for direct blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) failed_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) for (i = 0; i <index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ext2_free_blocks(inode, new_blocks[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * ext2_alloc_branch - allocate and set up a chain of blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @indirect_blks: depth of the chain (number of blocks to allocate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @blks: number of allocated direct blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @goal: preferred place for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @offsets: offsets (in the blocks) to store the pointers to next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * @branch: place to store the chain in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * This function allocates @num blocks, zeroes out all but the last one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * links them into chain and (if we are synchronous) writes them to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * In other words, it prepares a branch that can be spliced onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * inode. It stores the information about that chain in the branch[], in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * the same format as ext2_get_branch() would do. We are calling it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * we had read the existing part of chain and partial points to the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * triple of that (one with zero ->key). Upon the exit we have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * picture as after the successful ext2_get_block(), except that in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * place chain is disconnected - *branch->p is still zero (we did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * set the last link), but branch->key contains the number that should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * be placed into *branch->p to fill that gap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * If allocation fails we free all blocks we've allocated (and forget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * their buffer_heads) and return the error value the from failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * as described above and return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int ext2_alloc_branch(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int indirect_blks, int *blks, ext2_fsblk_t goal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int *offsets, Indirect *branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int i, n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ext2_fsblk_t new_blocks[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ext2_fsblk_t current_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) num = ext2_alloc_blocks(inode, goal, indirect_blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *blks, new_blocks, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) branch[0].key = cpu_to_le32(new_blocks[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * metadata blocks and data blocks are allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for (n = 1; n <= indirect_blks; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Get buffer_head for parent block, zero it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * and set the pointer to new one, then send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * parent to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) branch[n].bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) memset(bh->b_data, 0, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) branch[n].p = (__le32 *) bh->b_data + offsets[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) branch[n].key = cpu_to_le32(new_blocks[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *branch[n].p = branch[n].key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if ( n == indirect_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) current_block = new_blocks[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * End of chain, update the last new metablock of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * the chain to point to the new allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * data blocks numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) for (i=1; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *(branch[n].p + i) = cpu_to_le32(++current_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mark_buffer_dirty_inode(bh, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* We used to sync bh here if IS_SYNC(inode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * But we now rely upon generic_write_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * and b_inode_buffers. But not for directories.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *blks = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = 1; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) bforget(branch[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) for (i = 0; i < indirect_blks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ext2_free_blocks(inode, new_blocks[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ext2_free_blocks(inode, new_blocks[i], num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * ext2_splice_branch - splice the allocated branch onto inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @inode: owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @block: (logical) number of block we are adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @where: location of missing link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @num: number of indirect blocks we are adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @blks: number of direct blocks we are adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * This function fills the missing link and does all housekeeping needed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * inode (->i_blocks, etc.). In case of success we end up with the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * chain to new block and return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void ext2_splice_branch(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) long block, Indirect *where, int num, int blks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct ext2_block_alloc_info *block_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ext2_fsblk_t current_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) block_i = EXT2_I(inode)->i_block_alloc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* XXX LOCKING probably should have i_meta_lock ?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* That's it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) *where->p = where->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Update the host buffer_head or inode to point to more just allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * direct blocks blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (num == 0 && blks > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) current_block = le32_to_cpu(where->key) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) for (i = 1; i < blks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *(where->p + i ) = cpu_to_le32(current_block++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * update the most recently allocated logical & physical block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * in i_block_alloc_info, to assist find the proper goal block for next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (block_i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) block_i->last_alloc_logical_block = block + blks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) block_i->last_alloc_physical_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) le32_to_cpu(where[num].key) + blks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* We are done with atomic stuff, now do the rest of housekeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* had we spliced it onto indirect block? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (where->bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mark_buffer_dirty_inode(where->bh, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Allocation strategy is simple: if we have to allocate something, we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * have to go the whole way to leaf. So let's do it before attaching anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * to tree, set linkage between the newborn blocks, write them if sync is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * required, recheck the path, free and repeat if check fails, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * set the last missing link (that will protect us from any truncate-generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * removals - all blocks on the path are immune now) and possibly force the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * write on the parent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * That has a nice additional property: no special recovery from the failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * allocations is needed - we simply release blocks and do not touch anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * reachable from inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * `handle' can be NULL if create == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * return > 0, # of blocks mapped or allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * return = 0, if plain lookup failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * return < 0, error case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int ext2_get_blocks(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) sector_t iblock, unsigned long maxblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u32 *bno, bool *new, bool *boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) Indirect chain[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) Indirect *partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ext2_fsblk_t goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int indirect_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int blocks_to_boundary = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct ext2_inode_info *ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ext2_fsblk_t first_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) BUG_ON(maxblocks == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) partial = ext2_get_branch(inode, depth, offsets, chain, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Simplest case - block found, no allocation needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) first_block = le32_to_cpu(chain[depth - 1].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*map more blocks*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) while (count < maxblocks && count <= blocks_to_boundary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ext2_fsblk_t blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!verify_chain(chain, chain + depth - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Indirect block might be removed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * truncate while we were reading it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Handling of that case: forget what we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * got now, go to reread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) partial = chain + depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) blk = le32_to_cpu(*(chain[depth-1].p + count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (blk == first_block + count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Next simple case - plain lookup or failed read of indirect block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!create || err == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) mutex_lock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * If the indirect block is missing while we are reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * the chain(ext2_get_branch() returns -EAGAIN err), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * if the chain has been changed after we grab the semaphore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * (either because another process truncated this branch, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * another get_block allocated this branch) re-grab the chain to see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * the request block has been allocated or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * Since we already block the truncate/other get_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * at this point, we will have the current copy of the chain when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * splice the branch into the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (err == -EAGAIN || !verify_chain(chain, partial)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) partial = ext2_get_branch(inode, depth, offsets, chain, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * Okay, we need to do block allocation. Lazily initialize the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * allocation info here if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ext2_init_block_alloc_info(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goal = ext2_find_goal(inode, iblock, partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* the number of blocks need to allocate for [d,t]indirect blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) indirect_blks = (chain + depth) - partial - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Next look up the indirect map to count the total number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * direct blocks to allocate for this branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) count = ext2_blks_to_allocate(partial, indirect_blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) maxblocks, blocks_to_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * XXX ???? Block out ext2_truncate while we alter the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) offsets + (partial - chain), partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (IS_DAX(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * We must unmap blocks before zeroing so that writeback cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * overwrite zeros with stale data from block device page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) clean_bdev_aliases(inode->i_sb->s_bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) le32_to_cpu(chain[depth-1].key),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * block must be initialised before we put it in the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * so that it's not found by another thread before it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * initialised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) err = sb_issue_zeroout(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) le32_to_cpu(chain[depth-1].key), count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) *new = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (count > blocks_to_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *boundary = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Clean up and exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) partial = chain + depth - 1; /* the whole chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *bno = le32_to_cpu(chain[depth-1].key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int ext2_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) bool new = false, boundary = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u32 bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) map_bh(bh_result, inode->i_sb, bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bh_result->b_size = (ret << inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) set_buffer_boundary(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned flags, struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) unsigned long first_block = offset >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bool new = false, boundary = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) u32 bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ret = ext2_get_blocks(inode, first_block, max_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) &bno, &new, &boundary, flags & IOMAP_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) iomap->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) iomap->bdev = inode->i_sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) iomap->offset = (u64)first_block << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) iomap->dax_dev = sbi->s_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) iomap->type = IOMAP_HOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) iomap->addr = IOMAP_NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) iomap->length = 1 << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) iomap->type = IOMAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) iomap->addr = (u64)bno << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) iomap->length = (u64)ret << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) iomap->flags |= IOMAP_F_MERGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) iomap->flags |= IOMAP_F_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ssize_t written, unsigned flags, struct iomap *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (iomap->type == IOMAP_MAPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) written < length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) (flags & IOMAP_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ext2_write_failed(inode->i_mapping, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) const struct iomap_ops ext2_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .iomap_begin = ext2_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) .iomap_end = ext2_iomap_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) const struct iomap_ops ext2_iomap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #endif /* CONFIG_FS_DAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return generic_block_fiemap(inode, fieinfo, start, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int ext2_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return block_write_full_page(page, ext2_get_block, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int ext2_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return mpage_readpage(page, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void ext2_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) mpage_readahead(rac, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ext2_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = block_write_begin(mapping, pos, len, flags, pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ext2_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int ext2_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ret < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ext2_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ext2_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int ext2_nobh_writepage(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return nobh_writepage(page, ext2_get_block, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return generic_block_bmap(mapping,block,ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct address_space *mapping = file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) loff_t offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret < 0 && iov_iter_rw(iter) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ext2_write_failed(mapping, offset + count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return mpage_writepages(mapping, wbc, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) const struct address_space_operations ext2_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .readpage = ext2_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .readahead = ext2_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .writepage = ext2_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) .write_begin = ext2_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) .write_end = ext2_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .bmap = ext2_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .direct_IO = ext2_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .writepages = ext2_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) const struct address_space_operations ext2_nobh_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .readpage = ext2_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .readahead = ext2_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .writepage = ext2_nobh_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .write_begin = ext2_nobh_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .write_end = nobh_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .bmap = ext2_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .direct_IO = ext2_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .writepages = ext2_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static const struct address_space_operations ext2_dax_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .writepages = ext2_dax_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .direct_IO = noop_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .set_page_dirty = noop_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .invalidatepage = noop_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Probably it should be a library function... search for first non-zero word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * or memcmp with zero_page, whatever is better for particular architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Linus?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static inline int all_zeroes(__le32 *p, __le32 *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) while (p < q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (*p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * ext2_find_shared - find the indirect blocks for partial truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * @inode: inode in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * @depth: depth of the affected branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * @offsets: offsets of pointers in that branch (see ext2_block_to_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * @chain: place to store the pointers to partial indirect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * @top: place to the (detached) top of branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * This is a helper function used by ext2_truncate().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * When we do truncate() we may have to clean the ends of several indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * blocks but leave the blocks themselves alive. Block is partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * truncated if some data below the new i_size is referred from it (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * it is on the path to the first completely truncated data block, indeed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * We have to free the top of that path along with everything to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * of the path. Since no allocation past the truncation point is possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * until ext2_truncate() finishes, we may safely do the latter, but top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * of branch may require special attention - pageout below the truncation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * point might try to populate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * We atomically detach the top of branch from the tree, store the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * number of its root in *@top, pointers to buffer_heads of partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * truncated blocks - in @chain[].bh and pointers to their last elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * that should not be removed - in @chain[].p. Return value is the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * to last filled element of @chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * The work left to caller to do the actual freeing of subtrees:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * a) free the subtree starting from *@top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * b) free the subtrees whose roots are stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * c) free the subtrees growing from the inode past the @chain[0].p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * (no partially truncated stuff there).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static Indirect *ext2_find_shared(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int offsets[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) Indirect chain[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) __le32 *top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) Indirect *partial, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int k, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *top = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) for (k = depth; k > 1 && !offsets[k-1]; k--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) partial = ext2_get_branch(inode, k, offsets, chain, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) partial = chain + k-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * If the branch acquired continuation since we've looked at it -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * fine, it should all survive and (new) top doesn't belong to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) write_lock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!partial->key && *partial->p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) write_unlock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto no_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * OK, we've found the last block that must survive. The rest of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * branch should be detached before unlocking. However, if that rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * of branch is all ours and does not grow immediately from the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * it's easier to cheat and just decrement partial->p.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (p == chain + k - 1 && p > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) p->p--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) *top = *p->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) *p->p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) write_unlock(&EXT2_I(inode)->i_meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) while(partial > p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) brelse(partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) no_top:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * ext2_free_data - free a list of data blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * @inode: inode we are dealing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * @p: array of block numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * @q: points immediately past the end of array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * We are freeing all blocks referred from that array (numbers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * stored as little-endian 32-bit) and updating @inode->i_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) unsigned long block_to_free = 0, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) for ( ; p < q ; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) nr = le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) *p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* accumulate blocks to free if they're contiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto free_this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) else if (block_to_free == nr - count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ext2_free_blocks (inode, block_to_free, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) free_this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) block_to_free = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ext2_free_blocks (inode, block_to_free, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * ext2_free_branches - free an array of branches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * @inode: inode we are dealing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * @p: array of block numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @q: pointer immediately past the end of array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * @depth: depth of the branches to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * We are freeing all blocks referred from these branches (numbers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * stored as little-endian 32-bit) and updating @inode->i_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (depth--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) for ( ; p < q ; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) nr = le32_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) *p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) bh = sb_bread(inode->i_sb, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * A read failure? Report error and clear slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * (should be rare).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ext2_error(inode->i_sb, "ext2_free_branches",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) "Read failure, inode=%ld, block=%ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) inode->i_ino, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ext2_free_branches(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) (__le32*)bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) (__le32*)bh->b_data + addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) bforget(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ext2_free_blocks(inode, nr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ext2_free_data(inode, p, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* dax_sem must be held when calling this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) __le32 *i_data = EXT2_I(inode)->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct ext2_inode_info *ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) Indirect chain[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) Indirect *partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) __le32 nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) long iblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) unsigned blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) WARN_ON(!rwsem_is_locked(&ei->dax_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) n = ext2_block_to_path(inode, iblock, offsets, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (n == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * From here we block out all ext2_get_block() callers who want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * modify the block allocation tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) mutex_lock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (n == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ext2_free_data(inode, i_data+offsets[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) i_data + EXT2_NDIR_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto do_indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) partial = ext2_find_shared(inode, n, offsets, chain, &nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* Kill the top of shared branch (already detached) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (partial == chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) mark_buffer_dirty_inode(partial->bh, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* Clear the ends of indirect blocks on the shared branch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) while (partial > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ext2_free_branches(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) partial->p + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) (__le32*)partial->bh->b_data+addr_per_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) (chain+n-1) - partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mark_buffer_dirty_inode(partial->bh, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) brelse (partial->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) partial--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) do_indirects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Kill the remaining (whole) subtrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) switch (offsets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) nr = i_data[EXT2_IND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) i_data[EXT2_IND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ext2_free_branches(inode, &nr, &nr+1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) case EXT2_IND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) nr = i_data[EXT2_DIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) i_data[EXT2_DIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ext2_free_branches(inode, &nr, &nr+1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case EXT2_DIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) nr = i_data[EXT2_TIND_BLOCK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) i_data[EXT2_TIND_BLOCK] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ext2_free_branches(inode, &nr, &nr+1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) case EXT2_TIND_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ext2_discard_reservation(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) mutex_unlock(&ei->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) S_ISLNK(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (ext2_inode_is_fast_symlink(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dax_sem_down_write(EXT2_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) __ext2_truncate_blocks(inode, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dax_sem_up_write(EXT2_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int ext2_setsize(struct inode *inode, loff_t newsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) S_ISLNK(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (ext2_inode_is_fast_symlink(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (IS_DAX(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) error = iomap_zero_range(inode, newsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) PAGE_ALIGN(newsize) - newsize, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) &ext2_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } else if (test_opt(inode->i_sb, NOBH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) error = nobh_truncate_page(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) newsize, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) error = block_truncate_page(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) newsize, ext2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dax_sem_down_write(EXT2_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) truncate_setsize(inode, newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) __ext2_truncate_blocks(inode, newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dax_sem_up_write(EXT2_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (inode_needs_sync(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sync_mapping_buffers(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) sync_inode_metadata(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct buffer_head **p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) unsigned long block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) unsigned long block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct ext2_group_desc * gdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) goto Einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) gdp = ext2_get_group_desc(sb, block_group, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!gdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) goto Egdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * Figure out the offset within the block group inode table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) block = le32_to_cpu(gdp->bg_inode_table) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) (offset >> EXT2_BLOCK_SIZE_BITS(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (!(bh = sb_bread(sb, block)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) goto Eio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) *p = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) offset &= (EXT2_BLOCK_SIZE(sb) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return (struct ext2_inode *) (bh->b_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) Einval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) (unsigned long) ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) Eio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ext2_error(sb, "ext2_get_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) "unable to read inode block - inode=%lu, block=%lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) (unsigned long) ino, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) Egdp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) void ext2_set_inode_flags(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) unsigned int flags = EXT2_I(inode)->i_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) S_DIRSYNC | S_DAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (flags & EXT2_SYNC_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) inode->i_flags |= S_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (flags & EXT2_APPEND_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) inode->i_flags |= S_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (flags & EXT2_IMMUTABLE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) inode->i_flags |= S_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (flags & EXT2_NOATIME_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) inode->i_flags |= S_NOATIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (flags & EXT2_DIRSYNC_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) inode->i_flags |= S_DIRSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) inode->i_flags |= S_DAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) void ext2_set_file_ops(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) inode->i_op = &ext2_file_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) inode->i_fop = &ext2_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) inode->i_mapping->a_ops = &ext2_dax_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) else if (test_opt(inode->i_sb, NOBH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) inode->i_mapping->a_ops = &ext2_nobh_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) inode->i_mapping->a_ops = &ext2_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct ext2_inode_info *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct buffer_head * bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct ext2_inode *raw_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) long ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) uid_t i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) gid_t i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) inode = iget_locked(sb, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!(inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ei->i_block_alloc_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (IS_ERR(raw_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = PTR_ERR(raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) inode->i_mode = le16_to_cpu(raw_inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (!(test_opt (inode->i_sb, NO_UID32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) i_uid_write(inode, i_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) i_gid_write(inode, i_gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) inode->i_size = le32_to_cpu(raw_inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* We now have enough fields to check if the inode was active or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * This is needed because nfsd might try to access dead inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * the test is that same one that e2fsck uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * NeilBrown 1999oct15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* this inode is deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ret = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ei->i_flags = le32_to_cpu(raw_inode->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ext2_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ei->i_frag_no = raw_inode->i_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ei->i_frag_size = raw_inode->i_fsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ei->i_dir_acl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (ei->i_file_acl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ei->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (i_size_read(inode) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ei->i_dtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) inode->i_generation = le32_to_cpu(raw_inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ei->i_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ei->i_dir_start_lookup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * NOTE! The in-memory inode i_data array is in little-endian order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * even on big-endian machines: we do NOT byteswap the block numbers!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) for (n = 0; n < EXT2_N_BLOCKS; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ei->i_data[n] = raw_inode->i_block[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ext2_set_file_ops(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) } else if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) inode->i_op = &ext2_dir_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) inode->i_fop = &ext2_dir_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (test_opt(inode->i_sb, NOBH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) inode->i_mapping->a_ops = &ext2_nobh_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) inode->i_mapping->a_ops = &ext2_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) } else if (S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (ext2_inode_is_fast_symlink(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) inode->i_link = (char *)ei->i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) inode->i_op = &ext2_fast_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) nd_terminate_link(ei->i_data, inode->i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) sizeof(ei->i_data) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) inode->i_op = &ext2_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) inode_nohighmem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (test_opt(inode->i_sb, NOBH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) inode->i_mapping->a_ops = &ext2_nobh_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) inode->i_mapping->a_ops = &ext2_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) inode->i_op = &ext2_special_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (raw_inode->i_block[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) init_special_inode(inode, inode->i_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) init_special_inode(inode, inode->i_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) brelse (bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) bad_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) iget_failed(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static int __ext2_write_inode(struct inode *inode, int do_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct ext2_inode_info *ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ino_t ino = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) uid_t uid = i_uid_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) gid_t gid = i_gid_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (IS_ERR(raw_inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* For fields not not tracking in the in-memory inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * initialise them to zero for new inodes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (ei->i_state & EXT2_STATE_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) raw_inode->i_mode = cpu_to_le16(inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (!(test_opt(sb, NO_UID32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * Fix up interoperability with old kernels. Otherwise, old inodes get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * re-used with the upper 16 bits of the uid/gid intact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!ei->i_dtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) raw_inode->i_uid_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) raw_inode->i_gid_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) raw_inode->i_uid_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) raw_inode->i_gid_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) raw_inode->i_size = cpu_to_le32(inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) raw_inode->i_flags = cpu_to_le32(ei->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) raw_inode->i_frag = ei->i_frag_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) raw_inode->i_fsize = ei->i_frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (inode->i_size > 0x7fffffffULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) EXT2_SB(sb)->s_es->s_rev_level ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) cpu_to_le32(EXT2_GOOD_OLD_REV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* If this is the first large file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * created, add a flag to the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) spin_lock(&EXT2_SB(sb)->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ext2_update_dynamic_rev(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) EXT2_SET_RO_COMPAT_FEATURE(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) spin_unlock(&EXT2_SB(sb)->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) raw_inode->i_generation = cpu_to_le32(inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (old_valid_dev(inode->i_rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) raw_inode->i_block[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) cpu_to_le32(old_encode_dev(inode->i_rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) raw_inode->i_block[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) raw_inode->i_block[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) raw_inode->i_block[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) cpu_to_le32(new_encode_dev(inode->i_rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) raw_inode->i_block[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) } else for (n = 0; n < EXT2_N_BLOCKS; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) raw_inode->i_block[n] = ei->i_data[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (do_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (buffer_req(bh) && !buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) printk ("IO error syncing ext2 inode [%s:%08lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) sb->s_id, (unsigned long) ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ei->i_state &= ~EXT2_STATE_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) brelse (bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int ext2_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) u32 request_mask, unsigned int query_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct inode *inode = d_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct ext2_inode_info *ei = EXT2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (flags & EXT2_APPEND_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) stat->attributes |= STATX_ATTR_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (flags & EXT2_COMPR_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) stat->attributes |= STATX_ATTR_COMPRESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (flags & EXT2_IMMUTABLE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) stat->attributes |= STATX_ATTR_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (flags & EXT2_NODUMP_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) stat->attributes |= STATX_ATTR_NODUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) stat->attributes_mask |= (STATX_ATTR_APPEND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) STATX_ATTR_COMPRESSED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) STATX_ATTR_ENCRYPTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) STATX_ATTR_IMMUTABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) STATX_ATTR_NODUMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) generic_fillattr(inode, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) error = setattr_prepare(dentry, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (is_quota_modification(inode, iattr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) error = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) error = dquot_transfer(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) error = ext2_setsize(inode, iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) setattr_copy(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (iattr->ia_valid & ATTR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) error = posix_acl_chmod(inode, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }