^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "reiserfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/exportfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int reiserfs_commit_write(struct file *f, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned from, unsigned to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void reiserfs_evict_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * We need blocks for transaction + (user+group) quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * update (possibly delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int jbegin_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) JOURNAL_PER_BALANCE_CNT * 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!inode->i_nlink && !is_bad_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) goto no_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * The = 0 happens when we abort creating a new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * for some reason like lack of space..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * also handles bad_inode case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) reiserfs_delete_xattrs(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (journal_begin(&th, inode->i_sb, jbegin_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) reiserfs_discard_prealloc(&th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) err = reiserfs_delete_object(&th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Do quota update inside a transaction for journaled quotas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * We must do that after delete_object so that quota updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * go into the same transaction as stat data deletion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dquot_free_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (journal_end(&th))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * check return value from reiserfs_delete_object after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * ending the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * all items of file are deleted, so we can remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * "save" link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * we can't do anything about an error here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) remove_save_link(inode, 0 /* not truncate */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* no object items are in the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* note this must go after the journal_end to prevent deadlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dquot_drop(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) inode->i_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) no_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dquot_drop(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __u32 objectid, loff_t offset, int type, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) key->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) key->on_disk_key.k_dir_id = dirid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) key->on_disk_key.k_objectid = objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) set_cpu_key_k_offset(key, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) set_cpu_key_k_type(key, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) key->key_length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * take base of inode_key (it comes from inode always) (dirid, objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * and version from an inode, set offset and type of key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int type, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) _make_cpu_key(key, get_inode_item_key_version(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* when key is 0, do not set version and short key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) loff_t offset, int type, int length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int entry_count /*or ih_free_space */ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ih->ih_key.k_objectid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cpu_to_le32(key->on_disk_key.k_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) put_ih_version(ih, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) set_le_ih_k_offset(ih, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) set_le_ih_k_type(ih, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) put_ih_item_len(ih, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* set_ih_free_space (ih, 0); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * for directory items it is entry count, for directs and stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * datas - 0xffff, for indirects - 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) put_ih_entry_count(ih, entry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * FIXME: we might cache recently accessed indirect item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Ugh. Not too eager for that....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * I cut the code until such time as I see a convincing argument (benchmark).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * I don't want a bloated inode struct..., and I don't like code complexity....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * cutting the code is fine, since it really isn't in use yet and is easy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * to add back in. But, Vladimir has a really good idea here. Think
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * about what happens for reading a file. For each page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * The VFS layer calls reiserfs_readpage, who searches the tree to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * an indirect item. This indirect item has X number of pointers, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * X is a big number if we've done the block allocation right. But,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * we only use one or two of these pointers during each call to readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * needlessly researching again later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * The size of the cache could be dynamic based on the size of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * I'd also like to see us cache the location the stat data item, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * we are needlessly researching for that frequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * --chris
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * If this page has a file tail in it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * it was read in by get_block_create_0, the page data is valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * but tail is still sitting in a direct item, and we can't write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * it. So, look through this page, and check all the mapped buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * to make sure they have valid block numbers. Any that don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * to be unmapped, so that __block_write_begin will correctly call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * reiserfs_get_block to convert the tail into an unformatted node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void fix_tail_page_for_writing(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct buffer_head *head, *next, *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (page && page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (buffer_mapped(bh) && bh->b_blocknr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) reiserfs_unmap_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * reiserfs_get_block does not need to allocate a block only if it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * done already or non-hole position has been found in the indirect item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline int allocation_needed(int retval, b_blocknr_t allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct item_head *ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __le32 * item, int pos_in_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) get_block_num(item, pos_in_item))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline int indirect_item_found(int retval, struct item_head *ih)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline void set_block_dev_mapped(struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) b_blocknr_t block, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) map_bh(bh, inode->i_sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * files which were created in the earlier version can not be longer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * than 2 gb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int file_capable(struct inode *inode, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* it is new file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* old file, but 'block' is inside of 2gb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int restart_transaction(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct inode *inode, struct treepath *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct super_block *s = th->t_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) BUG_ON(!th->t_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pathrelse(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* we cannot restart while nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (th->t_refcount > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err = journal_end(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * it is called by get_block when create == 0. Returns block number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * for 'block'-th logical block of file. When it hits direct item it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * returns 0 (being called from bmap) or read direct item into piece
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * of page (bh_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Please improve the english/clarity in the comment above, as it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * hard to understand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int _get_block_create_0(struct inode *inode, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct buffer_head *bh_result, int args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) INITIALIZE_PATH(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct item_head *ih, tmp_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) b_blocknr_t blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) char *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int chars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* prepare the key to look for the 'block'-th block of file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) make_cpu_key(&key, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) result = search_for_position_by_key(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (result != POSITION_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) kunmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (result == IO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * We do not return -ENOENT if there is a hole but page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * uptodate, because it means that there is some MMAPED data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * associated with it that is yet to be written to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if ((args & GET_BLOCK_NO_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) && !PageUptodate(bh_result->b_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (is_indirect_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * FIXME: here we could cache indirect item or part of it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * the inode to avoid search_by_key in case of subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * access to file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) blocknr = get_block_num(ind_item, path.pos_in_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) map_bh(bh_result, inode->i_sb, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (path.pos_in_item ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) set_buffer_boundary(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * We do not return -ENOENT if there is a hole but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * page is uptodate, because it means that there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * some MMAPED data associated with it that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * yet to be written to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if ((args & GET_BLOCK_NO_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) && !PageUptodate(bh_result->b_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kunmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* requested data are in direct item(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!(args & GET_BLOCK_READ_DIRECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * we are called by bmap. FIXME: we can not map block of file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * when it is stored in direct item(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kunmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * if we've got a direct item, and the buffer or page was uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * we don't want to pull data off disk again. skip to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * end, where we map the buffer and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (buffer_uptodate(bh_result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * grab_tail_page can trigger calls to reiserfs_get_block on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * up to date pages without any buffers. If the page is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * to date, we don't want read old data off disk. Set the up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * to date bit on the buffer instead and jump to the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) set_buffer_uptodate(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* read file tail into part of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) copy_item_head(&tmp_ih, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * we only want to kmap if we are reading the tail into the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * this is not the common case, so we don't kmap until we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * sure we need to. But, this means the item might move if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * kmap schedules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) p = (char *)kmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) memset(p, 0, inode->i_sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!is_direct_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * make sure we don't read more bytes than actually exist in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the file. This can happen in odd cases where i_size isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * correct, and when direct item padding results in a few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * extra bytes at the end of the direct item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) chars =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) inode->i_size - (le_ih_k_offset(ih) - 1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) path.pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) chars = ih_item_len(ih) - path.pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) p += chars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * we done, if read direct item is not the last item of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * node FIXME: we could try to check right delimiting key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * to see whether direct item continues in the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * neighbor or rely on i_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* update key to look for the next piece */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) result = search_for_position_by_key(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (result != POSITION_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* i/o error most likely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) flush_dcache_page(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kunmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (result == IO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * this buffer has valid data, but isn't valid for io. mapping it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * block #0 tells the rest of reiserfs it just has a tail in it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) map_bh(bh_result, inode->i_sb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) set_buffer_uptodate(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * this is called to create file map. So, _get_block_create_0 will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * read direct item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int reiserfs_bmap(struct inode *inode, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!file_capable(inode, block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* do not read the direct item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) _get_block_create_0(inode, block, bh_result, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * special version of get_block that is only used by grab_tail_page right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * now. It is sent to __block_write_begin, and when you try to get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * block past the end of the file (or a block from a hole) it returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * -ENOENT instead of a valid buffer. __block_write_begin expects to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * be able to do i/o on the buffers returned, unless an error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * is also returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * So, this allows __block_write_begin to be used for reading a single block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * in a page. Where it does not produce a valid page for holes, or past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * end of the file. This turns out to be exactly what we need for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * tails for conversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * The point of the wrapper is forcing a certain value for create, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * though the VFS layer is calling this function with create==1. If you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * don't use this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct buffer_head *bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * This is special helper for reiserfs_get_block in case we are executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * direct_IO request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static int reiserfs_get_blocks_direct_io(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct buffer_head *bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bh_result->b_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * We set the b_size before reiserfs_get_block call since it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * referenced in convert_tail_for_hole() that may be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * reiserfs_get_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) bh_result->b_size = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = reiserfs_get_block(inode, iblock, bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) create | GET_BLOCK_NO_DANGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* don't allow direct io onto tail pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * make sure future calls to the direct io funcs for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * offset in the file fail by unmapping the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) clear_buffer_mapped(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Possible unpacked tail. Flush the data before pages have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * disappeared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) err = reiserfs_commit_for_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * helper function for when reiserfs_get_block is called for a hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * but the file tail is still in a direct item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * bh_result is the buffer head for the hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * tail_offset is the offset of the start of the tail in the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * This calls prepare_write, which will start a new transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * you should not be in a transaction, or have any paths held when you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * call this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int convert_tail_for_hole(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct buffer_head *bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) loff_t tail_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long tail_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long tail_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct page *tail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct page *hole_page = bh_result->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if ((tail_offset & (bh_result->b_size - 1)) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* always try to read until the end of the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) tail_start = tail_offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) index = tail_offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * hole_page can be zero in case of direct_io, we are sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * that we cannot get here if we write with O_DIRECT into tail page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!hole_page || index != hole_page->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) tail_page = grab_cache_page(inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!tail_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) tail_page = hole_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * we don't have to make sure the conversion did not happen while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * we were locking the page because anyone that could convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * must first take i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * We must fix the tail page for writing because it might have buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * that are mapped, but have a block number of 0. This indicates tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * data that has been read directly into the page, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * __block_write_begin won't trigger a get_block in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) fix_tail_page_for_writing(tail_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) retval = __reiserfs_write_begin(tail_page, tail_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) tail_end - tail_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* tail conversion might change the data in the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) flush_dcache_page(tail_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (tail_page != hole_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unlock_page(tail_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) put_page(tail_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static inline int _allocate_block(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) b_blocknr_t * allocated_block_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct treepath *path, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #ifdef REISERFS_PREALLOCATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!(flags & GET_BLOCK_NO_IMUX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) path, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int reiserfs_get_block(struct inode *inode, sector_t block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int repeat, retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* b_blocknr_t is (unsigned) 32 bit int*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) b_blocknr_t allocated_block_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) INITIALIZE_PATH(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct buffer_head *bh, *unbh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct item_head *ih, tmp_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __le32 *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int fs_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct reiserfs_transaction_handle *th = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * space reserved in transaction batch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * . 3 balancings in direct->indirect conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * . 1 block involved into reiserfs_update_sd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * XXX in practically impossible worst case direct2indirect()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * can incur (much) more than 3 balancings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * quota update for user, group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int jbegin_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) JOURNAL_PER_BALANCE_CNT * 3 + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int dangle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) loff_t new_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) version = get_inode_item_key_version(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!file_capable(inode, block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * if !create, we aren't changing the FS, so we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * log anything, so we don't need to start a transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!(create & GET_BLOCK_CREATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* find number of block-th logical block of the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ret = _get_block_create_0(inode, block, bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) create | GET_BLOCK_READ_DIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * if we're already in a transaction, make sure to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * any new transactions we start in this func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if ((create & GET_BLOCK_NO_DANGLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) reiserfs_transaction_running(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dangle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * If file is of such a size, that it might have a tail and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * tails are enabled we should mark it as possibly needing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * tail packing on close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if ((have_large_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) && inode->i_size < i_block_size(inode) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) || (have_small_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) && inode->i_size < i_block_size(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* set the key of the first byte in the 'block'-th block of file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) start_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) research:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) retval = search_for_position_by_key(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) item = tp_item_body(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) pos_in_item = path.pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) fs_gen = get_generation(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) copy_item_head(&tmp_ih, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (allocation_needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) (retval, allocated_block_nr, ih, item, pos_in_item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* we have to allocate block for the unformatted node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto start_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) repeat =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) _allocate_block(th, block, inode, &allocated_block_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) &path, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * restart the transaction to give the journal a chance to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * some blocks. releases the path, so we have to go back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * research if we succeed on the second try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) retval = restart_transaction(th, inode, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) repeat =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) _allocate_block(th, block, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) &allocated_block_nr, NULL, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (repeat == QUOTA_EXCEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) retval = -EDQUOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) retval = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (fs_changed(fs_gen, inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) && item_moved(&tmp_ih, &path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (indirect_item_found(retval, ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) b_blocknr_t unfm_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * 'block'-th block is in the file already (there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * corresponding cell in some indirect item). But it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * zero unformatted node pointer (hole)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unfm_ptr = get_block_num(item, pos_in_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (unfm_ptr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* use allocated block to plug the hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (fs_changed(fs_gen, inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) && item_moved(&tmp_ih, &path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) reiserfs_restore_prepared_buffer(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (buffer_dirty(bh_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) && reiserfs_data_ordered(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) reiserfs_add_ordered_list(inode, bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) put_block_num(item, pos_in_item, allocated_block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unfm_ptr = allocated_block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) journal_mark_dirty(th, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) set_block_dev_mapped(bh_result, unfm_ptr, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!dangle && th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) retval = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * the item was found, so new blocks were not added to the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * there is no need to make sure the inode is updated with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto start_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * desired position is not found or is in the direct item. We have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * to append file with holes up to 'block'-th block converting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * direct items to indirect one if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (is_statdata_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) __le32 unp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct cpu_key tmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* indirect item has to be inserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) make_le_item_head(&tmp_ih, &key, version, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) TYPE_INDIRECT, UNFM_P_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 0 /* free_space */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * we are going to add 'block'-th block to the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * Use allocated block for that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (cpu_key_k_offset(&key) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unp = cpu_to_le32(allocated_block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) set_block_dev_mapped(bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) allocated_block_nr, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tmp_key = key; /* ;) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) set_cpu_key_k_offset(&tmp_key, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) PATH_LAST_POSITION(&path)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) inode, (char *)&unp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) reiserfs_free_block(th, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) allocated_block_nr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * retval == -ENOSPC, -EDQUOT or -EIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * or -EEXIST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) } else if (is_direct_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* direct item has to be converted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) loff_t tail_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) tail_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ((le_ih_k_offset(ih) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * direct item we just found fits into block we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * to map. Convert it into unformatted node: use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * bh_result for the conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (tail_offset == cpu_key_k_offset(&key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) set_block_dev_mapped(bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) allocated_block_nr, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unbh = bh_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * we have to pad file tail stored in direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * item(s) up to block size and convert it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * to unformatted node. FIXME: this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * also get into page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * ugly, but we can only end the transaction if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * we aren't nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) BUG_ON(!th->t_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (th->t_refcount == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) reiserfs_end_persistent_transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) (th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) th = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) convert_tail_for_hole(inode, bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tail_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (retval != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) reiserfs_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) "clm-6004",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) "convert tail failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) "inode %lu, error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (allocated_block_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * the bitmap, the super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * and the stat data == 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) th = reiserfs_persistent_transaction(inode->i_sb, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) reiserfs_free_block(th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) allocated_block_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) direct2indirect(th, inode, &path, unbh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) tail_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) reiserfs_unmap_buffer(unbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) reiserfs_free_block(th, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) allocated_block_nr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * it is important the set_buffer_uptodate is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * after the direct2indirect. The buffer might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * contain valid data newer than the data on disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * (read by readpage, changed, and then sent here by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * writepage). direct2indirect needs to know if unbh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * was already up to date, so it can decide if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * data in unbh needs to be replaced with data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * the disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) set_buffer_uptodate(unbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * unbh->b_page == NULL in case of DIRECT_IO request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * this means buffer will disappear shortly, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * should not be added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (unbh->b_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * we've converted the tail, so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * flush unbh before the transaction commits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) reiserfs_add_tail_list(inode, unbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * mark it dirty now to prevent commit_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * from adding this buffer to the inode's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * dirty buffer list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * AKPM: changed __mark_buffer_dirty to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * mark_buffer_dirty(). It's still atomic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * but it sets the page dirty too, which makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * it eligible for writeback at any time by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * VM (which was also the case with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * __mark_buffer_dirty())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) mark_buffer_dirty(unbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * append indirect item with holes if needed, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * appending pointer to 'block'-th block use block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * which is already allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct cpu_key tmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * We use this in case we need to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * only one block which is a fastpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) unp_t unf_single = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) unp_t *un;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) __u64 max_to_insert =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) UNFM_P_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) __u64 blocks_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) "vs-804: invalid position for append");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * indirect item has to be appended,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * set up key of that position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * (key type is unimportant)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) make_cpu_key(&tmp_key, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) le_key_k_offset(version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) &ih->ih_key) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) op_bytes_number(ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) inode->i_sb->s_blocksize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) TYPE_INDIRECT, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) "green-805: invalid offset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) blocks_needed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ((cpu_key_k_offset(&key) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (blocks_needed == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) un = &unf_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) un = kcalloc(min(blocks_needed, max_to_insert),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) UNFM_P_SIZE, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (!un) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) un = &unf_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) blocks_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) max_to_insert = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (blocks_needed <= max_to_insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * we are going to add target block to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * the file. Use allocated block for that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) un[blocks_needed - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) cpu_to_le32(allocated_block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) set_block_dev_mapped(bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) allocated_block_nr, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* paste hole to the indirect item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * If kcalloc failed, max_to_insert becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * zero and it means we only have space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * one block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) blocks_needed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) max_to_insert ? max_to_insert : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) reiserfs_paste_into_item(th, &path, &tmp_key, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) (char *)un,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) UNFM_P_SIZE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) blocks_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (blocks_needed != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) kfree(un);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) reiserfs_free_block(th, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) allocated_block_nr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * We need to mark new file size in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * this function will be interrupted/aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * later on. And we may do this only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) inode->i_size +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) inode->i_sb->s_blocksize * blocks_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (done == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * this loop could log more blocks than we had originally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * asked for. So, we have to allow the transaction to end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * if it is too big or too full. Update the inode so things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * are consistent if we crash before the function returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * release the path so that anybody waiting on the path before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * ending their transaction will be able to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) retval = restart_transaction(th, inode, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * inserting indirect pointers for a hole can take a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * long time. reschedule if needed and also release the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * lock for others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) reiserfs_cond_resched(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) retval = search_for_position_by_key(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (retval == POSITION_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) reiserfs_warning(inode->i_sb, "vs-825",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) "%K should not be found", &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) retval = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (allocated_block_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) reiserfs_free_block(th, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) allocated_block_nr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) item = tp_item_body(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) pos_in_item = path.pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (th && (!dangle || (retval && !th->t_trans_id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (th->t_trans_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) retval = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) reiserfs_check_path(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void reiserfs_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mpage_readahead(rac, reiserfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Compute real number of used bytes by file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * Following three functions can go away when we'll have enough space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * stat item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static int real_space_diff(struct inode *inode, int sd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) loff_t blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return sd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * End of file is also in full block with indirect reference, so round
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * up to the next block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * there is just no way to know if the tail is actually packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * on the file, so we have to assume it isn't. When we pack the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * tail, we add 4 bytes to pretend there really is an unformatted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * node pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) bytes =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ((inode->i_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) sd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int sd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return inode->i_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) (loff_t) (real_space_diff(inode, sd_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return ((loff_t) real_space_diff(inode, sd_size)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) (((loff_t) blocks) << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* Compute number of blocks used by file in ReiserFS counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) loff_t bytes = inode_get_bytes(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) loff_t real_space = real_space_diff(inode, sd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* keeps fsck and non-quota versions of reiserfs happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) bytes += (loff_t) 511;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * files from before the quota patch might i_blocks such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * bytes < real_space. Deal with that here to prevent it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * going negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (bytes < real_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return (bytes - real_space) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * BAD: new directories have stat data of new type and all other items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * of old type. Version stored in the inode says about body items, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * in update_stat_data we can not rely on inode, but have to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * item version directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* called by read_locked_inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void init_inode(struct inode *inode, struct treepath *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) __u32 rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) bh = PATH_PLAST_BUFFER(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ih = tp_item_head(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) copy_key(INODE_PKEY(inode), &ih->ih_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) REISERFS_I(inode)->i_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) REISERFS_I(inode)->i_prealloc_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) REISERFS_I(inode)->i_prealloc_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) REISERFS_I(inode)->i_trans_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) REISERFS_I(inode)->i_jl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) reiserfs_init_xattr_rwsem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (stat_data_v1(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct stat_data_v1 *sd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) (struct stat_data_v1 *)ih_item_body(bh, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned long blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) set_inode_item_key_version(inode, KEY_FORMAT_3_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) set_inode_sd_version(inode, STAT_DATA_V1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) inode->i_mode = sd_v1_mode(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) set_nlink(inode, sd_v1_nlink(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) i_uid_write(inode, sd_v1_uid(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) i_gid_write(inode, sd_v1_gid(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) inode->i_size = sd_v1_size(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) inode->i_atime.tv_sec = sd_v1_atime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) inode->i_mtime.tv_sec = sd_v1_mtime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) inode->i_ctime.tv_sec = sd_v1_ctime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) inode->i_atime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) inode->i_ctime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) inode->i_mtime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) inode->i_blocks = sd_v1_blocks(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) blocks = (inode->i_size + 511) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * there was a bug in <=3.5.23 when i_blocks could take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * negative values. Starting from 3.5.17 this value could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * even be stored in stat data. For such files we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * i_blocks based on file size. Just 2 notes: this can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * wrong for sparse files. On-disk value will be only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * updated if file's inode will ever change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (inode->i_blocks > blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) inode->i_blocks = blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) rdev = sd_v1_rdev(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) REISERFS_I(inode)->i_first_direct_byte =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) sd_v1_first_direct_byte(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * an early bug in the quota code can give us an odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * number for the block count. This is incorrect, fix it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (inode->i_blocks & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) inode->i_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) inode_set_bytes(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) to_real_used_space(inode, inode->i_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) SD_V1_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * nopack is initially zero for v1 objects. For v2 objects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * nopack is initialised from sd_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * new stat data found, but object may have old items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * (directories and symlinks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) inode->i_mode = sd_v2_mode(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) set_nlink(inode, sd_v2_nlink(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) i_uid_write(inode, sd_v2_uid(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) inode->i_size = sd_v2_size(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) i_gid_write(inode, sd_v2_gid(sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) inode->i_mtime.tv_sec = sd_v2_mtime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) inode->i_atime.tv_sec = sd_v2_atime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) inode->i_ctime.tv_sec = sd_v2_ctime(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) inode->i_ctime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) inode->i_mtime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) inode->i_atime.tv_nsec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) inode->i_blocks = sd_v2_blocks(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) rdev = sd_v2_rdev(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) inode->i_generation =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) inode->i_generation = sd_v2_generation(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) set_inode_item_key_version(inode, KEY_FORMAT_3_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) set_inode_item_key_version(inode, KEY_FORMAT_3_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) REISERFS_I(inode)->i_first_direct_byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) set_inode_sd_version(inode, STAT_DATA_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) inode_set_bytes(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) to_real_used_space(inode, inode->i_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) SD_V2_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * read persistent inode attributes from sd and initialise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * generic inode flags from them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) pathrelse(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) inode->i_op = &reiserfs_file_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) inode->i_fop = &reiserfs_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) inode->i_mapping->a_ops = &reiserfs_address_space_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) } else if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) inode->i_op = &reiserfs_dir_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) inode->i_fop = &reiserfs_dir_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) } else if (S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) inode->i_op = &reiserfs_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) inode_nohighmem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) inode->i_mapping->a_ops = &reiserfs_address_space_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) inode->i_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) inode->i_op = &reiserfs_special_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* update new stat data with inode fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void inode2sd(void *sd, struct inode *inode, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct stat_data *sd_v2 = (struct stat_data *)sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) set_sd_v2_mode(sd_v2, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) set_sd_v2_nlink(sd_v2, inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) set_sd_v2_uid(sd_v2, i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) set_sd_v2_size(sd_v2, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) set_sd_v2_gid(sd_v2, i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) set_sd_v2_generation(sd_v2, inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) set_sd_v2_attrs(sd_v2, REISERFS_I(inode)->i_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* used to copy inode's fields to old stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) set_sd_v1_mode(sd_v1, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) set_sd_v1_uid(sd_v1, i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) set_sd_v1_gid(sd_v1, i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) set_sd_v1_nlink(sd_v1, inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) set_sd_v1_size(sd_v1, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* Sigh. i_first_direct_byte is back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) set_sd_v1_first_direct_byte(sd_v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) REISERFS_I(inode)->i_first_direct_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * NOTE, you must prepare the buffer head before sending it here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * and then log it after the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static void update_stat_data(struct treepath *path, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) bh = PATH_PLAST_BUFFER(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ih = tp_item_head(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (!is_statdata_le_ih(ih))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) INODE_PKEY(inode), ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* path points to old stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (stat_data_v1(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) inode2sd_v1(ih_item_body(bh, ih), inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) inode2sd(ih_item_body(bh, ih), inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct inode *inode, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) INITIALIZE_PATH(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int fs_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct item_head *ih, tmp_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* key type is unimportant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* look for the object's stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) retval = search_item(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) reiserfs_error(inode->i_sb, "vs-13050",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) "i/o failure occurred trying to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) "update %K stat data", &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (retval == ITEM_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) pos = PATH_LAST_POSITION(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (inode->i_nlink == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) reiserfs_warning(inode->i_sb, "vs-13060",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) "stat data of object %k (nlink == %d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) "not found (pos %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) INODE_PKEY(inode), inode->i_nlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) reiserfs_check_path(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * sigh, prepare_for_journal might schedule. When it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * schedules the FS might change. We have to detect that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * and loop back to the search if the stat data item has moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) copy_item_head(&tmp_ih, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) fs_gen = get_generation(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* Stat_data item has been moved after scheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (fs_changed(fs_gen, inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) && item_moved(&tmp_ih, &path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) reiserfs_restore_prepared_buffer(inode->i_sb, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) update_stat_data(&path, inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) journal_mark_dirty(th, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * reiserfs_read_locked_inode is called to read the inode off disk, and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * does a make_bad_inode when things go wrong. But, we need to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * and clear the key in the private portion of the inode, otherwise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * corresponding iput might try to delete whatever object the inode last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * represented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static void reiserfs_make_bad_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) memset(INODE_PKEY(inode), 0, KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * initially this function was derived from minix or ext2's analog and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * evolved as the prototype did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int reiserfs_init_locked_inode(struct inode *inode, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) inode->i_ino = args->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * looks for stat data in the tree, and fills up the fields of in-core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * inode stat data fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) void reiserfs_read_locked_inode(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct reiserfs_iget_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) INITIALIZE_PATH(path_to_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) unsigned long dirino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dirino = args->dirid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * set version 1, version 2 could be used too, because stat data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * key is the same in both versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* look for the object's stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) retval = search_item(inode->i_sb, &key, &path_to_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) reiserfs_error(inode->i_sb, "vs-13070",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) "i/o failure occurred trying to find "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) "stat data of %K", &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) reiserfs_make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* a stale NFS handle can trigger this without it being an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (retval != ITEM_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) pathrelse(&path_to_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) reiserfs_make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) clear_nlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) init_inode(inode, &path_to_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * It is possible that knfsd is trying to access inode of a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * that is being removed from the disk by some other thread. As we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * update sd on unlink all that is required is to check for nlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * here. This bug was first found by Sizif when debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * SquidNG/Butterfly, forgotten, and found again after Philippe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * More logical fix would require changes in fs/inode.c:iput() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * remove inode from hash-table _after_ fs cleaned disk stuff up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * in iget() to return NULL if I_FREEING inode is found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * hash-table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * Currently there is one place where it's ok to meet inode with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * nlink==0: processing of open-unlinked and half-truncated files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * during mount (fs/reiserfs/super.c:finish_unfinished()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if ((inode->i_nlink == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) reiserfs_warning(inode->i_sb, "vs-13075",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) "dead inode read from disk %K. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) "This is likely to be race with knfsd. Ignore",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) reiserfs_make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* init inode should be relsing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) reiserfs_check_path(&path_to_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * Stat data v1 doesn't support ACLs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (get_inode_sd_version(inode) == STAT_DATA_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) cache_no_acl(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * @inode: inode from hash table to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * @opaque: "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * This function is called by iget5_locked() to distinguish reiserfs inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * having the same inode numbers. Such inodes can only exist due to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * error condition. One of them should be bad. Inodes with identical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * inode numbers (objectids) are distinguished by parent directory ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) int reiserfs_find_actor(struct inode *inode, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct reiserfs_iget_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) args = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* args is already in CPU order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return (inode->i_ino == args->objectid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct reiserfs_iget_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) args.objectid = key->on_disk_key.k_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) args.dirid = key->on_disk_key.k_dir_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) depth = reiserfs_write_unlock_nested(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) inode = iget5_locked(s, key->on_disk_key.k_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) reiserfs_find_actor, reiserfs_init_locked_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) (void *)(&args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) reiserfs_write_lock_nested(s, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (inode->i_state & I_NEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) reiserfs_read_locked_inode(inode, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /* either due to i/o error or a stale NFS handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static struct dentry *reiserfs_get_dentry(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) u32 objectid, u32 dir_id, u32 generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) key.on_disk_key.k_objectid = objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) key.on_disk_key.k_dir_id = dir_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) reiserfs_write_lock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) inode = reiserfs_iget(sb, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (inode && !IS_ERR(inode) && generation != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) generation != inode->i_generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) reiserfs_write_unlock(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return d_obtain_alias(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int fh_len, int fh_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * fhtype happens to reflect the number of u32s encoded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * due to a bug in earlier code, fhtype might indicate there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * are more u32s then actually fitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * so if fhtype seems to be more than len, reduce fhtype.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * Valid types are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * 2 - objectid + dir_id - legacy support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * 3 - objectid + dir_id + generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * 4 - objectid + dir_id + objectid and dirid of parent - legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * 5 - objectid + dir_id + generation + objectid and dirid of parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * 6 - as above plus generation of directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * 6 does not fit in NFSv2 handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (fh_type > fh_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (fh_type != 6 || fh_len != 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) reiserfs_warning(sb, "reiserfs-13077",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "nfsd/reiserfs, fhtype=%d, len=%d - odd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) fh_type, fh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) fh_type = fh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (fh_len < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) int fh_len, int fh_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (fh_type > fh_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) fh_type = fh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (fh_type < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return reiserfs_get_dentry(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) (fh_type >= 5) ? fid->raw[3] : fid->raw[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) (fh_type >= 5) ? fid->raw[4] : fid->raw[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) (fh_type == 6) ? fid->raw[5] : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct inode *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) int maxlen = *lenp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (parent && (maxlen < 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *lenp = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return FILEID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) } else if (maxlen < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) *lenp = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return FILEID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) data[0] = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) data[2] = inode->i_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) *lenp = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) data[3] = parent->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) *lenp = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (maxlen >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) data[5] = parent->i_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) *lenp = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return *lenp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * looks for stat data, then copies fields to it, marks the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * containing stat data as dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * reiserfs inodes are never really dirty, since the dirty inode call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * always logs them. This call allows the VFS inode marking routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * to properly mark inodes for datasync and such, but only actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * does something when called for a synchronous update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int jbegin_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (sb_rdonly(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * memory pressure can sometimes initiate write_inode calls with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * sync == 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * these cases are just when the system needs ram, not when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * inode needs to reach disk for safety, and they can safely be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * ignored because the altered inode has already been logged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) reiserfs_update_sd(&th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) journal_end_sync(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * stat data of new object is inserted already, this inserts the item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * containing "." and ".." entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct item_head *ih, struct treepath *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) struct inode *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct super_block *sb = th->t_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) char empty_dir[EMPTY_DIR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) char *body = empty_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) _make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) TYPE_DIRENTRY, 3 /*key length */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * compose item head for new item. Directories consist of items of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * is done by reiserfs_new_inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (old_format_only(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) ih->ih_key.k_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) INODE_PKEY(dir)->k_dir_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) INODE_PKEY(dir)->k_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) make_empty_dir_item(body, ih->ih_key.k_dir_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) ih->ih_key.k_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) INODE_PKEY(dir)->k_dir_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) INODE_PKEY(dir)->k_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) /* look for place in the tree for new item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) retval = search_item(sb, &key, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) reiserfs_error(sb, "vs-13080",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) "i/o failure occurred creating new directory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (retval == ITEM_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) pathrelse(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) reiserfs_warning(sb, "vs-13070",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) "object with this key exists (%k)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) &(ih->ih_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* insert item, that is empty directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return reiserfs_insert_item(th, path, &key, ih, inode, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * stat data of object has been inserted, this inserts the item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * containing the body of symlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct item_head *ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct treepath *path, const char *symname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int item_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct super_block *sb = th->t_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) _make_cpu_key(&key, KEY_FORMAT_3_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) le32_to_cpu(ih->ih_key.k_dir_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) le32_to_cpu(ih->ih_key.k_objectid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 1, TYPE_DIRECT, 3 /*key length */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 0 /*free_space */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) /* look for place in the tree for new item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) retval = search_item(sb, &key, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) reiserfs_error(sb, "vs-13080",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) "i/o failure occurred creating new symlink");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (retval == ITEM_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) pathrelse(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) reiserfs_warning(sb, "vs-13080",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) "object with this key exists (%k)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) &(ih->ih_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* insert item, that is body of symlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return reiserfs_insert_item(th, path, &key, ih, inode, symname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * inserts the stat data into the tree, and then calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * reiserfs_new_directory (to insert ".", ".." item if new object is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * directory) or reiserfs_new_symlink (to insert symlink body if new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * object is symlink) or nothing (if new object is regular file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * NOTE! uid and gid must already be set in the inode. If we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * non-zero due to an error, we have to drop the quota previously allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * for the fresh inode. This can only be done outside a transaction, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * if we return non-zero, we also end the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * @th: active transaction handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * @dir: parent directory for new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * @mode: mode of new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * @symname: symlink contents if inode is symlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * symlinks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * @inode: inode to be filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * @security: optional security context to associate with this inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct inode *dir, umode_t mode, const char *symname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /* 0 for regular, EMTRY_DIR_SIZE for dirs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) strlen (symname) for symlinks) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) loff_t i_size, struct dentry *dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct reiserfs_security_handle *security)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct super_block *sb = dir->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct reiserfs_iget_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) INITIALIZE_PATH(path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct item_head ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct stat_data sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) depth = reiserfs_write_unlock_nested(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) err = dquot_alloc_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) reiserfs_write_lock_nested(sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) goto out_end_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (!dir->i_nlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /* item head of new item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (!ih.ih_key.k_objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (old_format_only(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) err = insert_inode_locked4(inode, args.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) reiserfs_find_actor, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (old_format_only(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * not a perfect generation count, as object ids can be reused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * but this is as good as reiserfs can do right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * note that the private part of inode isn't filled in yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * we have to use the directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) #if defined( USE_INODE_GENERATION_COUNTER )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) inode->i_generation =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) inode->i_generation = ++event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* fill stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* uid and gid must already be set by the caller for quota init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) inode->i_size = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) inode->i_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) inode->i_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) REISERFS_I(inode)->i_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) REISERFS_I(inode)->i_prealloc_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) REISERFS_I(inode)->i_prealloc_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) REISERFS_I(inode)->i_trans_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) REISERFS_I(inode)->i_jl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) REISERFS_I(inode)->i_attrs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) reiserfs_init_xattr_rwsem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /* key to search for correct place for new stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) TYPE_STAT_DATA, 3 /*key length */ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* find proper place for inserting of stat data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) retval = search_item(sb, &key, &path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (retval == IO_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (retval == ITEM_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) pathrelse(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (old_format_only(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* i_uid or i_gid is too big to be stored in stat data v3.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) pathrelse(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) inode2sd_v1(&sd, inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) inode2sd(&sd, inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * store in in-core inode the key of stat data and version all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * object items will have (directory items will have old offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * format, other new objects will consist of new items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) set_inode_item_key_version(inode, KEY_FORMAT_3_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) set_inode_item_key_version(inode, KEY_FORMAT_3_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (old_format_only(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) set_inode_sd_version(inode, STAT_DATA_V1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) set_inode_sd_version(inode, STAT_DATA_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* insert the stat data into the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #ifdef DISPLACE_NEW_PACKING_LOCALITIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (REISERFS_I(dir)->new_packing_locality)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) th->displace_new_blocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) (char *)(&sd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) err = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) reiserfs_check_path(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) goto out_bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) #ifdef DISPLACE_NEW_PACKING_LOCALITIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (!th->displace_new_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) REISERFS_I(dir)->new_packing_locality = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (S_ISDIR(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /* insert item with "." and ".." */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (S_ISLNK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* insert body of symlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (!old_format_only(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) i_size = ROUND_UP(i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) retval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) err = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) reiserfs_check_path(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) journal_end(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) goto out_inserted_sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * Mark it private if we're creating the privroot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * or something under it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) inode->i_flags |= S_PRIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) inode->i_opflags &= ~IOP_XATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (reiserfs_posixacl(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) err = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) reiserfs_check_path(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) journal_end(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) goto out_inserted_sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) } else if (inode->i_sb->s_flags & SB_POSIXACL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) reiserfs_warning(inode->i_sb, "jdm-13090",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) "ACLs aren't enabled in the fs, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) "but vfs thinks they are!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (security->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) retval = reiserfs_security_write(th, inode, security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) err = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) reiserfs_check_path(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) retval = journal_end(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) err = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) goto out_inserted_sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) reiserfs_check_path(&path_to_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) out_bad_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /* Invalidate the object, nothing was inserted yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) INODE_PKEY(inode)->k_objectid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /* Quota change must be inside a transaction for journaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) dquot_free_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) out_end_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) journal_end(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * Drop can be outside and it needs more credits so it's better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * to have it outside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) dquot_drop(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) inode->i_flags |= S_NOQUOTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) out_inserted_sd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) clear_nlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) th->t_trans_id = 0; /* so the caller can't use this handle later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (inode->i_state & I_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * finds the tail page in the page cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * reads the last block in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * On success, page_result is set to a locked, pinned page, and bh_result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * is set to an up to date buffer for the last block in the file. returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * tail conversion is not done, so bh_result might not be valid for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * trying to write the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * on failure, nonzero is returned, page_result and bh_result are untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static int grab_tail_page(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct page **page_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct buffer_head **bh_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * we want the page with the last byte in the file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * not the page that will hold the next byte for appending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) unsigned long pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) unsigned long start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) unsigned long blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) struct buffer_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) * we know that we are only called with inode->i_size > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * we also know that a file tail can never be as big as a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * If i_size % blocksize == 0, our file is currently block aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * and it won't need converting or zeroing after a truncate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if ((offset & (blocksize - 1)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) page = grab_cache_page(inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /* start within the page of the last block in the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) start = (offset / blocksize) * blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) error = __block_write_begin(page, start, offset - start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) reiserfs_get_block_create_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (pos >= start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) pos += blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * note, this should never happen, prepare_write should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * taking care of this for us. If the buffer isn't up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * date, I've screwed up the code to find the buffer, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * code to call prepare_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) reiserfs_error(inode->i_sb, "clm-6000",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) "error reading block %lu", bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) *bh_result = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) *page_result = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * vfs version of truncate file. Must NOT be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * a transaction already started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * some code taken from block_truncate_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) /* we want the offset for the first byte after the end of the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) unsigned blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) unsigned length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (inode->i_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) error = grab_tail_page(inode, &page, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * -ENOENT means we truncated past the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * file, and get_block_create_0 could not find a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * block to read in, which is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (error != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) reiserfs_error(inode->i_sb, "clm-6001",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) "grab_tail_page failed %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * so, if page != NULL, we have a buffer head for the offset at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * then we have an unformatted node. Otherwise, we have a direct item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * and no zeroing is required on disk. We zero after the truncate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * because the truncate might pack the item anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * (it will unmap bh if it packs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * it is enough to reserve space in transaction for 2 balancings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * one for "save" link adding and another for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * cut_from_item. 1 is for update_sd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) error = journal_begin(&th, inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) JOURNAL_PER_BALANCE_CNT * 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (update_timestamps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * we are doing real truncate: if the system crashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * before the last transaction of truncating gets committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * - on reboot the file either appears truncated properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * or not truncated at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) add_save_link(&th, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) error = journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /* check reiserfs_do_truncate after ending the transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (err2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) error = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (update_timestamps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) error = remove_save_link(inode, 1 /* truncate */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) length = offset & (blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) /* if we are not on a block boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) length = blocksize - length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) zero_user(page, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (buffer_mapped(bh) && bh->b_blocknr != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static int map_block_for_writepage(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct buffer_head *bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) unsigned long block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) int fs_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct item_head tmp_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) __le32 *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct cpu_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) INITIALIZE_PATH(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) int pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) int jbegin_count = JOURNAL_PER_BALANCE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) int use_get_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) int bytes_copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) int copy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) int trans_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * catch places below that try to log something without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * starting a trans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) th.t_trans_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (!buffer_uptodate(bh_result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) kmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) start_over:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) research:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) retval = search_for_position_by_key(inode->i_sb, &key, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (retval != POSITION_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) use_get_block = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) bh = get_last_bh(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) ih = tp_item_head(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) item = tp_item_body(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) pos_in_item = path.pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /* we've found an unformatted node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (indirect_item_found(retval, ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (bytes_copied > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) reiserfs_warning(inode->i_sb, "clm-6002",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) "bytes_copied %d", bytes_copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (!get_block_num(item, pos_in_item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* crap, we are writing to a hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) use_get_block = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) set_block_dev_mapped(bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) get_block_num(item, pos_in_item), inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) } else if (is_direct_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) p = page_address(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) p += (byte_offset - 1) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) copy_size = ih_item_len(ih) - pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) fs_gen = get_generation(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) copy_item_head(&tmp_ih, ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (!trans_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* vs-3050 is gone, no need to drop the path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) retval = journal_begin(&th, inode->i_sb, jbegin_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) trans_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (fs_changed(fs_gen, inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) && item_moved(&tmp_ih, &path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) reiserfs_restore_prepared_buffer(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (fs_changed(fs_gen, inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) && item_moved(&tmp_ih, &path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) reiserfs_restore_prepared_buffer(inode->i_sb, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) copy_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) journal_mark_dirty(&th, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) bytes_copied += copy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) set_block_dev_mapped(bh_result, 0, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) /* are there still bytes left? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (bytes_copied < bh_result->b_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) (byte_offset + bytes_copied) < inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) set_cpu_key_k_offset(&key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) cpu_key_k_offset(&key) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) copy_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) goto research;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) reiserfs_warning(inode->i_sb, "clm-6003",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) "bad item inode %lu", inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) pathrelse(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (trans_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) int err = journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) retval = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) trans_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) /* this is where we fill in holes in the file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (use_get_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) retval = reiserfs_get_block(inode, block, bh_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) | GET_BLOCK_NO_DANGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (!retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!buffer_mapped(bh_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) || bh_result->b_blocknr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /* get_block failed to find a mapped unformatted node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) use_get_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) goto start_over;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) kunmap(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) * we've copied data from the page into the direct item, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * buffer in the page is now clean, mark it to reflect that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) lock_buffer(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) clear_buffer_dirty(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) unlock_buffer(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * mason@suse.com: updated in 2.5.54 to follow the same general io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * start/recovery path as __block_write_full_page, along with special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * code to handle reiserfs tails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static int reiserfs_write_full_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) unsigned long end_index = inode->i_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) unsigned long block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) sector_t last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct buffer_head *head, *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) int partial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) int nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) int checked = PageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) struct super_block *s = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) int bh_per_page = PAGE_SIZE / s->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) th.t_trans_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* no logging allowed when nonblocking or from PF_MEMALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (checked && (current->flags & PF_MEMALLOC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * The page dirty bit is cleared before writepage is called, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * means we have to tell create_empty_buffers to make dirty buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * The page really should be up to date at this point, so tossing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * in the BH_Uptodate is just a sanity check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) create_empty_buffers(page, s->s_blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) (1 << BH_Dirty) | (1 << BH_Uptodate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * last page in the file, zero out any contents past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * last byte in the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (page->index >= end_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) unsigned last_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) last_offset = inode->i_size & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) /* no file contents in this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (page->index >= end_index + 1 || !last_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) zero_user_segment(page, last_offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* first map all the buffers, logging any direct items we find */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (block > last_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * This can happen when the block size is less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * the page size. The corresponding bytes in the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) * were zero filled above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) } else if ((checked || buffer_dirty(bh)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) (!buffer_mapped(bh) || (buffer_mapped(bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) && bh->b_blocknr ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * not mapped yet, or it points to a direct item, search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * the btree for the mapping info, and log any direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * items found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if ((error = map_block_for_writepage(inode, bh, block))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) * we start the transaction after map_block_for_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) * because it can create holes in the file (an unbounded operation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) * starting it here, we can make a reliable estimate for how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) * blocks we're going to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (checked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) ClearPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) reiserfs_write_lock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) error = journal_begin(&th, s, bh_per_page + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) reiserfs_write_unlock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /* now go through and lock any dirty buffers on the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (!buffer_mapped(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (buffer_mapped(bh) && bh->b_blocknr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (checked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) reiserfs_prepare_for_journal(s, bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) journal_mark_dirty(&th, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * from this point on, we know the buffer is mapped to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) * real block and not a direct item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (wbc->sync_mode != WB_SYNC_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (!trylock_buffer(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (test_clear_buffer_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) mark_buffer_async_write(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (checked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) error = journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) reiserfs_write_unlock(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * since any buffer might be the only dirty buffer on the page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * the first submit_bh can bring the page out of writeback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * be careful with the buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) struct buffer_head *next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (buffer_async_write(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) submit_bh(REQ_OP_WRITE, 0, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (nr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * if this page only had a direct item, it is very possible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * no io to be required without there being an error. Or,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * someone else could have locked them and sent them down the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * pipe without locking the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) partial = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (!partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * catches various errors, we need to make sure any valid dirty blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * get to the media. The page is currently locked and not marked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) mark_buffer_async_write(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * clear any dirty bits that might have come from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * getting attached to a dirty page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct buffer_head *next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (buffer_async_write(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) submit_bh(REQ_OP_WRITE, 0, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) put_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static int reiserfs_readpage(struct file *f, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) return block_read_full_page(page, reiserfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) reiserfs_wait_on_write_block(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) return reiserfs_write_full_page(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static void reiserfs_truncate_failed_write(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) truncate_inode_pages(inode->i_mapping, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) reiserfs_truncate_file(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static int reiserfs_write_begin(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) int old_ref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) *fsdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (flags & AOP_FLAG_CONT_EXPAND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) pos ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) *fsdata = (void *)(unsigned long)flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) index = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) page = grab_cache_page_write_begin(mapping, index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) reiserfs_wait_on_write_block(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) fix_tail_page_for_writing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (reiserfs_transaction_running(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) struct reiserfs_transaction_handle *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) th = (struct reiserfs_transaction_handle *)current->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) BUG_ON(!th->t_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) old_ref = th->t_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) th->t_refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) ret = __block_write_begin(page, pos, len, reiserfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (ret && reiserfs_transaction_running(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) struct reiserfs_transaction_handle *th = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * this gets a little ugly. If reiserfs_get_block returned an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * error and left a transacstion running, we've got to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * it, and we've got to free handle if it was a persistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * But, if we had nested into an existing transaction, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * to just drop the ref count on the handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * If old_ref == 0, the transaction is from reiserfs_get_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) * and it was a persistent trans. Otherwise, it was nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (th->t_refcount > old_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (old_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) th->t_refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) err = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) /* Truncate allocated blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) reiserfs_truncate_failed_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) int old_ref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) reiserfs_wait_on_write_block(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) fix_tail_page_for_writing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (reiserfs_transaction_running(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) struct reiserfs_transaction_handle *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) th = (struct reiserfs_transaction_handle *)current->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) BUG_ON(!th->t_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) old_ref = th->t_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) th->t_refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) ret = __block_write_begin(page, from, len, reiserfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (ret && reiserfs_transaction_running(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) struct reiserfs_transaction_handle *th = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * this gets a little ugly. If reiserfs_get_block returned an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * error and left a transacstion running, we've got to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * it, and we've got to free handle if it was a persistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * But, if we had nested into an existing transaction, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * to just drop the ref count on the handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * If old_ref == 0, the transaction is from reiserfs_get_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * and it was a persistent trans. Otherwise, it was nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (th->t_refcount > old_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (old_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) th->t_refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) err = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return generic_block_bmap(as, block, reiserfs_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) static int reiserfs_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) int update_sd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) struct reiserfs_transaction_handle *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) unsigned start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) bool locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) pos ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) reiserfs_wait_on_write_block(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) if (reiserfs_transaction_running(inode->i_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) th = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) th = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) start = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (unlikely(copied < len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) page_zero_new_buffers(page, start + copied, start + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) reiserfs_commit_page(inode, page, start, start + copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) * generic_commit_write does this for us, but does not update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * transaction tracking stuff when the size changes. So, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) * to do the i_size updates here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (pos + copied > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) struct reiserfs_transaction_handle myth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * If the file have grown beyond the border where it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) * can have a tail, unmark it as needing a tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * packing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if ((have_large_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) && inode->i_size > i_block_size(inode) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) || (have_small_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) && inode->i_size > i_block_size(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) ret = journal_begin(&myth, inode->i_sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) goto journal_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) inode->i_size = pos + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * this will just nest into our transaction. It's important
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * to use mark_inode_dirty so the inode gets pushed around on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * the dirty lists, and so that O_SYNC works as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) reiserfs_update_sd(&myth, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) update_sd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) ret = journal_end(&myth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) goto journal_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (!locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (!update_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) ret = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (pos + len > inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) reiserfs_truncate_failed_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return ret == 0 ? copied : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) journal_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (!update_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) ret = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) int reiserfs_commit_write(struct file *f, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) unsigned from, unsigned to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) int update_sd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) struct reiserfs_transaction_handle *th = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) depth = reiserfs_write_unlock_nested(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) reiserfs_wait_on_write_block(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) reiserfs_write_lock_nested(inode->i_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (reiserfs_transaction_running(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) th = current->journal_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) reiserfs_commit_page(inode, page, from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * generic_commit_write does this for us, but does not update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) * transaction tracking stuff when the size changes. So, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * to do the i_size updates here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (pos > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) struct reiserfs_transaction_handle myth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * If the file have grown beyond the border where it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) * can have a tail, unmark it as needing a tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) * packing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) if ((have_large_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) && inode->i_size > i_block_size(inode) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) || (have_small_tails(inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) && inode->i_size > i_block_size(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ret = journal_begin(&myth, inode->i_sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) goto journal_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) reiserfs_update_inode_transaction(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) inode->i_size = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * this will just nest into our transaction. It's important
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * to use mark_inode_dirty so the inode gets pushed around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) * on the dirty lists, and so that O_SYNC works as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) reiserfs_update_sd(&myth, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) update_sd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) ret = journal_end(&myth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) goto journal_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (!update_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) ret = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) journal_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (th) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (!update_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) reiserfs_update_sd(th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) ret = reiserfs_end_persistent_transaction(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (reiserfs_attrs(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (sd_attrs & REISERFS_SYNC_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) inode->i_flags |= S_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) inode->i_flags &= ~S_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (sd_attrs & REISERFS_IMMUTABLE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) inode->i_flags |= S_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) inode->i_flags &= ~S_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (sd_attrs & REISERFS_APPEND_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) inode->i_flags |= S_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) inode->i_flags &= ~S_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (sd_attrs & REISERFS_NOATIME_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) inode->i_flags |= S_NOATIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) inode->i_flags &= ~S_NOATIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (sd_attrs & REISERFS_NOTAIL_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) REISERFS_I(inode)->i_flags |= i_nopack_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * decide if this buffer needs to stay around for data logging or ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) * write purposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) spin_lock(&j->j_dirty_buffers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (!buffer_mapped(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) goto free_jh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) * the page is locked, and the only places that log a data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) * also lock the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (reiserfs_file_data_log(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * very conservative, leave the buffer pinned if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) * anyone might need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) } else if (buffer_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) struct reiserfs_journal_list *jl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) struct reiserfs_jh *jh = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) * why is this safe?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) * reiserfs_setattr updates i_size in the on disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) * stat data before allowing vmtruncate to be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) * If buffer was put onto the ordered list for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) * transaction, we know for sure either this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) * or an older one already has updated i_size on disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * and this ordered data won't be referenced in the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * if we crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) * if the buffer was put onto the ordered list for an older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * transaction, we need to leave it around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (jh && (jl = jh->jl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) free_jh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (ret && bh->b_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) reiserfs_free_jh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) spin_unlock(&j->j_dirty_buffers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) /* clm -- taken from fs/buffer.c:block_invalidate_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct buffer_head *head, *bh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) unsigned int curr_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) unsigned int stop = offset + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) int partial_page = (offset || length < PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (!partial_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) ClearPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) unsigned int next_off = curr_off + bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (next_off > stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) * is this block fully invalidated?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) if (offset <= curr_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (invalidatepage_can_drop(inode, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) reiserfs_unmap_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) curr_off = next_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) * We release buffers only if the entire page is being invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) * The get_block cached value has been unconditionally invalidated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) * so real IO is not possible anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) if (!partial_page && ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ret = try_to_release_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) /* maybe should BUG_ON(!ret); - neilb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) static int reiserfs_set_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (reiserfs_file_data_log(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) SetPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) return __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) return __set_page_dirty_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * Returns 1 if the page's buffers were dropped. The page is locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * in the buffers at page_buffers(page).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * even in -o notail mode, we can't be sure an old mount without -o notail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * didn't create files with tails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) struct buffer_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) WARN_ON(PageChecked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) spin_lock(&j->j_dirty_buffers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) if (bh->b_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (!buffer_dirty(bh) && !buffer_locked(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) reiserfs_free_jh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) ret = try_to_free_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) spin_unlock(&j->j_dirty_buffers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) * We thank Mingming Cao for helping us understand in great detail what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) * to do in this section of the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) ret = blockdev_direct_IO(iocb, inode, iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) reiserfs_get_blocks_direct_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * In case of error extending write may have instantiated a few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) * blocks outside i_size. Trim these off again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) loff_t isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) loff_t end = iocb->ki_pos + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) truncate_setsize(inode, isize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) reiserfs_vfs_truncate_file(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) unsigned int ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) error = setattr_prepare(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) /* must be turned off for recursive notify_change calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (is_quota_modification(inode, attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) error = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (attr->ia_valid & ATTR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) * version 2 items will be caught by the s_maxbytes check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) * done for us in vmtruncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) attr->ia_size > MAX_NON_LFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) error = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) /* fill in hole pointers in the expanding truncate case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) if (attr->ia_size > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) error = generic_cont_expand_simple(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (REISERFS_I(inode)->i_prealloc_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) /* we're changing at most 2 bitmaps, inode + super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) err = journal_begin(&th, inode->i_sb, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) reiserfs_discard_prealloc(&th, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) err = journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) error = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) * file size is changed, ctime and mtime are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) * to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) (get_inode_sd_version(inode) == STAT_DATA_V1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) /* stat data of format v3.5 has 16 bit uid and gid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) struct reiserfs_transaction_handle th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) int jbegin_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 2 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) error = reiserfs_chown_xattrs(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * (user+group)*(old+new) structure - we count quota
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) * info and , inode write (sb, inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) error = journal_begin(&th, inode->i_sb, jbegin_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) error = dquot_transfer(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) reiserfs_write_lock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) * Update corresponding info in inode so that everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) * is in one transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (attr->ia_valid & ATTR_UID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) inode->i_uid = attr->ia_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if (attr->ia_valid & ATTR_GID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) inode->i_gid = attr->ia_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) error = journal_end(&th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) reiserfs_write_unlock(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) if ((attr->ia_valid & ATTR_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) attr->ia_size != i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) error = inode_newsize_ok(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) * Could race against reiserfs_file_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * if called from NFS, so take tailpack mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) mutex_lock(&REISERFS_I(inode)->tailpack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) truncate_setsize(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) reiserfs_truncate_file(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) mutex_unlock(&REISERFS_I(inode)->tailpack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) setattr_copy(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (!error && reiserfs_posixacl(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) if (attr->ia_valid & ATTR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) error = reiserfs_acl_chmod(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) const struct address_space_operations reiserfs_address_space_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) .writepage = reiserfs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) .readpage = reiserfs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) .readahead = reiserfs_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) .releasepage = reiserfs_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) .invalidatepage = reiserfs_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) .write_begin = reiserfs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) .write_end = reiserfs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) .bmap = reiserfs_aop_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) .direct_IO = reiserfs_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) .set_page_dirty = reiserfs_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) };