^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ufs/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Daniel Pirkl <daniel.pirkl@email.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Charles University, Faculty of Mathematics and Physics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * linux/fs/ext2/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 1992, 1993, 1994, 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Remy Card (card@masi.ibp.fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Laboratoire MASI - Institut Blaise Pascal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Universite Pierre et Marie Curie (Paris VI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * linux/fs/minix/inode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Big-endian to little-endian byte-swapping/bitmaps by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * David S. Miller (davem@caip.rutgers.edu), 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/iversion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "ufs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "ufs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "swab.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int ptrs = uspi->s_apb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int ptrs_bits = uspi->s_apbshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) const long direct_blocks = UFS_NDADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) indirect_blocks = ptrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) double_blocks = (1 << (ptrs_bits * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (i_block < direct_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) } else if ((i_block -= direct_blocks) < indirect_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) offsets[n++] = UFS_IND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) offsets[n++] = i_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) } else if ((i_block -= indirect_blocks) < double_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) offsets[n++] = UFS_DIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) offsets[n++] = i_block >> ptrs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) offsets[n++] = UFS_TIND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) offsets[n++] = i_block >> (ptrs_bits * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) offsets[n++] = i_block & (ptrs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __fs32 key32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __fs64 key64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } Indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline int grow_chain32(struct ufs_inode_info *ufsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct buffer_head *bh, __fs32 *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) Indirect *from, Indirect *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) Indirect *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) to->bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) seq = read_seqbegin(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) to->key32 = *(__fs32 *)(to->p = v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } while (read_seqretry(&ufsi->meta_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return (p > to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline int grow_chain64(struct ufs_inode_info *ufsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct buffer_head *bh, __fs64 *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) Indirect *from, Indirect *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) Indirect *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) to->bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) seq = read_seqbegin(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) to->key64 = *(__fs64 *)(to->p = v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } while (read_seqretry(&ufsi->meta_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return (p > to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Returns the location of the fragment from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * the beginning of the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int shift = uspi->s_apbshift-uspi->s_fpbshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) Indirect chain[4], *q = chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned flags = UFS_SB(sb)->s_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) uspi->s_fpbshift, uspi->s_apbmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) (unsigned long long)mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) p = offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto ufs2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!q->key32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) while (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __fs32 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned n = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bh = sb_bread(sb, uspi->s_sbbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) fs32_to_cpu(sb, q->key32) + (n>>shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ptr = (__fs32 *)bh->b_data + (n & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!q->key32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) res = fs32_to_cpu(sb, q->key32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ufs2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!q->key64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) while (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __fs64 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned n = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bh = sb_bread(sb, uspi->s_sbbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) fs64_to_cpu(sb, q->key64) + (n>>shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ptr = (__fs64 *)bh->b_data + (n & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!q->key64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto no_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) res = fs64_to_cpu(sb, q->key64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) res += uspi->s_sbbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) no_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) while (q > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) brelse(q->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) q--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) changed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) while (q > chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) brelse(q->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) q--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Unpacking tails: we have a file with partial final block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * we had been asked to extend it. If the fragment being written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * is within the same block, we need to extend the tail just to cover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * that fragment. Otherwise the tail is extended to full block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Note that we might need to create a _new_ tail, but that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * be handled elsewhere; this is strictly for resizing old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ufs_extend_tail(struct inode *inode, u64 writes_to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int *err, struct page *locked_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned block = ufs_fragstoblks(lastfrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (writes_to < (lastfrag | uspi->s_fpbmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) new_size = (writes_to & uspi->s_fpbmask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) new_size = uspi->s_fpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) p = ufs_get_direct_data_ptr(uspi, ufsi, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) new_size - (lastfrag & uspi->s_fpbmask), err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) locked_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return tmp != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * ufs_inode_getfrag() - allocate new fragment(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @inode: pointer to inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @index: number of block pointer within the inode's array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @new_fragment: number of new allocated fragment(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @err: we set it if something wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @new: we set it if we allocate new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @locked_page: for ufs_new_fragments()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ufs_inode_getfrag(struct inode *inode, unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) sector_t new_fragment, int *err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int *new, struct page *locked_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u64 tmp, goal, lastfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned nfrags = uspi->s_fpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* TODO : to be done for write support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto ufs2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) p = ufs_get_direct_data_ptr(uspi, ufsi, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tmp = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) lastfrag = ufsi->i_lastfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* will that be a new tail? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) nfrags = (new_fragment & uspi->s_fpbmask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) goal = ufs_data_ptr_to_cpu(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goal += uspi->s_fpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goal, nfrags, err, locked_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ufs_sync_inode (inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return tmp + uspi->s_sbbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* This part : To be implemented ....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) Required only for writing, not required for READ-ONLY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ufs2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u2_block = ufs_fragstoblks(fragment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u2_blockoff = ufs_fragnum(fragment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) p = ufsi->i_u1.u2_i_data + block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) goal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) repeat2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tmp = fs32_to_cpu(sb, *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) lastfrag = ufsi->i_lastfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * ufs_inode_getblock() - allocate new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @inode: pointer to inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @ind_block: block number of the indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @index: number of pointer within the indirect block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @new_fragment: number of new allocated fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * (block will hold this fragment and also uspi->s_fpb-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @err: see ufs_inode_getfrag()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @new: see ufs_inode_getfrag()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @locked_page: see ufs_inode_getfrag()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ufs_inode_getblock(struct inode *inode, u64 ind_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned index, sector_t new_fragment, int *err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int *new, struct page *locked_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int shift = uspi->s_apbshift - uspi->s_fpbshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u64 tmp = 0, goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!ind_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bh = sb_bread(sb, ind_block + (index >> shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (unlikely(!bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) index &= uspi->s_apbmask >> uspi->s_fpbshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (uspi->fs_magic == UFS2_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) p = (__fs64 *)bh->b_data + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) p = (__fs32 *)bh->b_data + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) tmp = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (index && (uspi->fs_magic == UFS2_MAGIC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goal = tmp + uspi->s_fpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) goal = bh->b_blocknr + uspi->s_fpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) uspi->s_fpb, err, locked_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) brelse (bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) UFSD("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tmp += uspi->s_sbbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * readpage, writepage and so on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int err = 0, new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u64 phys64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned frag = fragment & uspi->s_fpbmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) phys64 = ufs_frag_map(inode, offsets, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (phys64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (fragment >= UFS_NDIR_FRAGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) read_seqlock_excl(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (fragment < UFS_I(inode)->i_lastfrag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) read_sequnlock_excl(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) read_sequnlock_excl(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* This code entered only while writing ....? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mutex_lock(&UFS_I(inode)->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (unlikely(!depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ufs_warning(sb, "ufs_get_block", "block > big");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned lastfrag = UFS_I(inode)->i_lastfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned tailfrags = lastfrag & uspi->s_fpbmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (tailfrags && fragment >= lastfrag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!ufs_extend_tail(inode, fragment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) &err, bh_result->b_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (depth == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) &err, &new, bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) &err, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) for (i = 1; i < depth - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) fragment, &err, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) fragment, &err, &new, bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (phys64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) phys64 += frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) map_bh(bh_result, sb, phys64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mutex_unlock(&UFS_I(inode)->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (phys64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) map_bh(bh_result, sb, phys64 + frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int ufs_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return block_write_full_page(page,ufs_getfrag_block,wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int ufs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return block_read_full_page(page,ufs_getfrag_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return __block_write_begin(page, pos, len, ufs_getfrag_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void ufs_truncate_blocks(struct inode *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void ufs_write_failed(struct address_space *mapping, loff_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (to > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) truncate_pagecache(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ufs_truncate_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int ufs_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = block_write_begin(mapping, pos, len, flags, pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ufs_getfrag_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ufs_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int ufs_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ret < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ufs_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return generic_block_bmap(mapping,block,ufs_getfrag_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) const struct address_space_operations ufs_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) .readpage = ufs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .writepage = ufs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .write_begin = ufs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .write_end = ufs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) .bmap = ufs_bmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void ufs_set_inode_ops(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) inode->i_op = &ufs_file_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) inode->i_fop = &ufs_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) inode->i_mapping->a_ops = &ufs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) inode->i_op = &ufs_dir_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) inode->i_fop = &ufs_dir_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) inode->i_mapping->a_ops = &ufs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) } else if (S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!inode->i_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) inode->i_op = &simple_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) inode->i_mapping->a_ops = &ufs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) inode->i_op = &page_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) inode_nohighmem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) init_special_inode(inode, inode->i_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Copy data to the in-core inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (inode->i_nlink == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Linux now has 32-bit uid and gid, so we can support EFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) inode->i_mtime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) inode->i_atime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) inode->i_ctime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) sizeof(ufs_inode->ui_u2.ui_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Copy data to the in-core inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (inode->i_nlink == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Linux now has 32-bit uid and gid, so we can support EFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sizeof(ufs2_inode->ui_u2.ui_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct ufs_inode_info *ufsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) UFSD("ENTER, ino %lu\n", ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) inode = iget_locked(sb, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!(inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) err = ufs2_read_inode(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ufs2_inode + ufs_inotofsbo(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = ufs1_read_inode(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ufs_inode + ufs_inotofsbo(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) inode_inc_iversion(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ufsi->i_lastfrag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ufsi->i_dir_start_lookup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ufsi->i_osync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ufs_set_inode_ops(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) UFSD("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) bad_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) iget_failed(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ufs_inode->ui_atime.tv_usec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ufs_inode->ui_ctime.tv_usec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ufs_inode->ui_mtime.tv_usec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } else if (inode->i_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) sizeof(ufs_inode->ui_u2.ui_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sizeof(ufs_inode->ui_u2.ui_symlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) memset (ufs_inode, 0, sizeof(struct ufs_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) UFSD("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) } else if (inode->i_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) sizeof(ufs_inode->ui_u2.ui_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) sizeof(ufs_inode->ui_u2.ui_symlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) memset (ufs_inode, 0, sizeof(struct ufs2_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) UFSD("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static int ufs_update_inode(struct inode * inode, int do_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct buffer_head * bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) UFSD("ENTER, ino %lu\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (inode->i_ino < UFS_ROOTINO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (uspi->fs_magic == UFS2_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ufs2_update_inode(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ufs2_inode + ufs_inotofsbo(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (do_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) brelse (bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) UFSD("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ufs_sync_inode (struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return ufs_update_inode (inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) void ufs_evict_inode(struct inode * inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) int want_delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!inode->i_nlink && !is_bad_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) want_delete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (want_delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) inode->i_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (inode->i_blocks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) S_ISLNK(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ufs_truncate_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ufs_update_inode(inode, inode_needs_sync(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) invalidate_inode_buffers(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (want_delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ufs_free_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct to_free {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u64 to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (ctx->count && ctx->to != from) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ctx->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ctx->count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ctx->to = from + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static void ufs_trunc_direct(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct super_block * sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct ufs_sb_private_info * uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) u64 frag1, frag2, frag3, frag4, block1, block2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct to_free ctx = {.inode = inode};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned i, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) UFSD("ENTER: ino %lu\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) frag1 = DIRECT_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) frag3 = frag4 & ~uspi->s_fpbmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) block1 = block2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (frag2 > frag3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) frag2 = frag4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) frag3 = frag4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) } else if (frag2 < frag3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) block1 = ufs_fragstoblks (frag2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) block2 = ufs_fragstoblks (frag3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) " frag3 %llu, frag4 %llu\n", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) (unsigned long long)frag1, (unsigned long long)frag2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) (unsigned long long)block1, (unsigned long long)block2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) (unsigned long long)frag3, (unsigned long long)frag4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (frag1 >= frag2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) goto next1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Free first free fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tmp = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!tmp )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ufs_panic (sb, "ufs_trunc_direct", "internal error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) frag2 -= frag1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) frag1 = ufs_fragnum (frag1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ufs_free_fragments(inode, tmp + frag1, frag2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) next1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Free whole blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) for (i = block1 ; i < block2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) p = ufs_get_direct_data_ptr(uspi, ufsi, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) tmp = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) write_seqlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) write_sequnlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) free_data(&ctx, tmp, uspi->s_fpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) free_data(&ctx, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (frag3 >= frag4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto next3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * Free last free fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) tmp = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!tmp )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ufs_panic(sb, "ufs_truncate_direct", "internal error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) frag4 = ufs_fragnum (frag4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) write_seqlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) write_sequnlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ufs_free_fragments (inode, tmp, frag4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) next3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) UFSD("EXIT: ino %lu\n", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (!ubh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (i = 0; i < uspi->s_apb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) void *p = ubh_get_data_ptr(uspi, ubh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) u64 block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) free_full_branch(inode, block, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct to_free ctx = {.inode = inode};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) for (i = 0; i < uspi->s_apb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) void *p = ubh_get_data_ptr(uspi, ubh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) u64 block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) free_data(&ctx, block, uspi->s_fpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) free_data(&ctx, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ubh_bforget(ubh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ufs_free_blocks(inode, ind_block, uspi->s_fpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (--depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) for (i = from; i < uspi->s_apb ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) void *p = ubh_get_data_ptr(uspi, ubh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) u64 block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) write_seqlock(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) write_sequnlock(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ubh_mark_buffer_dirty(ubh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) free_full_branch(inode, block, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct to_free ctx = {.inode = inode};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) for (i = from; i < uspi->s_apb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) void *p = ubh_get_data_ptr(uspi, ubh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) u64 block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) write_seqlock(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) write_sequnlock(&UFS_I(inode)->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ubh_mark_buffer_dirty(ubh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) free_data(&ctx, block, uspi->s_fpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) free_data(&ctx, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ubh_sync_block(ubh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ubh_brelse(ubh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned i, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sector_t lastfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct page *lastpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u64 phys64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!lastfrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) lastfrag--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) lastpage = ufs_get_locked_page(mapping, lastfrag >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) (PAGE_SHIFT - inode->i_blkbits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (IS_ERR(lastpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) bh = page_buffers(lastpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) for (i = 0; i < end; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err = ufs_getfrag_block(inode, lastfrag, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (buffer_new(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) clean_bdev_bh_alias(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * we do not zeroize fragment, because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * if it maped to hole, it already contains zeroes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) set_page_dirty(lastpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (lastfrag >= UFS_IND_FRAGMENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) phys64 = bh->b_blocknr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) for (i = 0; i < end; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) bh = sb_getblk(sb, i + phys64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) memset(bh->b_data, 0, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ufs_put_locked_page(lastpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void ufs_truncate_blocks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct ufs_inode_info *ufsi = UFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) unsigned offsets[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int depth2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct ufs_buffer_head *ubh[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) u64 block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) depth = ufs_block_to_path(inode, last, offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (!depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) depth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) for (depth2 = depth - 1; depth2; depth2--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (offsets[depth2] != uspi->s_apb - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) mutex_lock(&ufsi->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (depth == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ufs_trunc_direct(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) offsets[0] = UFS_IND_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* get the blocks that should be partially emptied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) for (i = 0; i < depth2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!ubh[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) write_seqlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) write_sequnlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) p = ufs_get_direct_data_ptr(uspi, ufsi, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) block = ufs_data_ptr_to_cpu(sb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) write_seqlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ufs_data_ptr_clear(uspi, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) write_sequnlock(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) read_seqlock_excl(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ufsi->i_lastfrag = DIRECT_FRAGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) read_sequnlock_excl(&ufsi->meta_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) mutex_unlock(&ufsi->truncate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int ufs_truncate(struct inode *inode, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) inode->i_ino, (unsigned long long)size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) (unsigned long long)i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) S_ISLNK(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) err = ufs_alloc_lastblock(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) truncate_setsize(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ufs_truncate_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) UFSD("EXIT: err %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int ufs_setattr(struct dentry *dentry, struct iattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) unsigned int ia_valid = attr->ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) error = setattr_prepare(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) error = ufs_truncate(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) setattr_copy(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) const struct inode_operations ufs_file_inode_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) .setattr = ufs_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) };