^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * aops.c - NTFS kernel address space operations and page cache handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2002 Richard Russon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bit_spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "attrib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "mft.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "runlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "types.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "ntfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ntfs_end_buffer_async_read - async io completion for reading attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @bh: buffer head on which io is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @uptodate: whether @bh is now uptodate or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Asynchronous I/O completion handler for reading pages belonging to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * attribute address space of an inode. The inodes can either be files or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * directories or they can be fake inodes describing some attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * If NInoMstProtected(), perform the post read mst fixups when all IO on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * page has been completed and mark the page uptodate or set the error bit on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * the page. To determine the size of the records that need fixing up, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * record size, and index_block_size_bits, to the log(base 2) of the ntfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * record size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct buffer_head *first, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ntfs_inode *ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int page_uptodate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (likely(uptodate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) s64 file_ofs, init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) file_ofs = ((s64)page->index << PAGE_SHIFT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bh_offset(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) init_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (unlikely(init_size > i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Race with shrinking truncate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) init_size = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Check for the current buffer head overflowing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (unlikely(file_ofs + bh->b_size > init_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (file_ofs < init_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ofs = init_size - file_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) memset(kaddr + bh_offset(bh) + ofs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bh->b_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) clear_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "0x%llx.", (unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) first = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spin_lock_irqsave(&first->b_uptodate_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) clear_buffer_async_read(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) tmp = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (!buffer_uptodate(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) page_uptodate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (buffer_async_read(tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (likely(buffer_locked(tmp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto still_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Async buffers must be locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) tmp = tmp->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } while (tmp != bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * If none of the buffers had errors then we can set the page uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * but we first have to perform the post read mst fixups, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Note we ignore fixup errors as those are detected when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * map_mft_record() is called which gives us per record granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * rather than per page granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!NInoMstProtected(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (likely(page_uptodate && !PageError(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u8 *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int i, recs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) rec_size = ni->itype.index.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) recs = PAGE_SIZE / rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Should have been verified before we got here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) BUG_ON(!recs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < recs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) post_read_mst_fixup((NTFS_RECORD*)(kaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) i * rec_size), rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (likely(page_uptodate && !PageError(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) still_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * ntfs_read_block - fill a @page of an address space with data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @page: page cache page to fill with data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Fill the page @page of the address space belonging to the @page->host inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * We read each buffer asynchronously and when all buffers are read in, our io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * completion handler ntfs_end_buffer_read_async(), if required, automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * applies the mst fixups to the page before finally marking it uptodate and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * unlocking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * We only enforce allocated_size limit because i_size is checked for in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * generic_file_read().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Return 0 on success and -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Contains an adapted version of fs/buffer.c::block_read_full_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static int ntfs_read_block(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) VCN vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) s64 init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ntfs_inode *ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ntfs_volume *vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) runlist_element *rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) sector_t iblock, lblock, zblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int blocksize, vcn_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int i, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned char blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* $MFT/$DATA must have its complete runlist in memory at all times. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) blocksize = vol->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) blocksize_bits = vol->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) create_empty_buffers(page, blocksize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (unlikely(!page_has_buffers(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) BUG_ON(!bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * We may be racing with truncate. To avoid some of the problems we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * now take a snapshot of the various sizes and use those for the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * of the function. In case of an extending truncate it just means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * may leave some buffers unmapped which are now allocated. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * not a problem since these buffers will just get mapped when a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * occurs. In case of a shrinking truncate, we will detect this later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * on due to the runlist being incomplete and if the page is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * fully truncated, truncate will throw it away as soon as we unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * it so no need to worry what we do with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) init_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (unlikely(init_size > i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Race with shrinking truncate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) init_size = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) zblock = (init_size + blocksize - 1) >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Loop through all the buffers in the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) nr = i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (unlikely(buffer_uptodate(bh)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (unlikely(buffer_mapped(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) arr[nr++] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bh->b_bdev = vol->sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Is the block within the allowed limits? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (iblock < lblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bool is_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Convert iblock into corresponding vcn and offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) vcn = (VCN)iblock << blocksize_bits >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) vcn_ofs = ((VCN)iblock << blocksize_bits) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) lock_retry_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rl = ni->runlist.rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (likely(rl != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Seek to element containing target vcn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) while (rl->length && rl[1].vcn <= vcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) lcn = LCN_RL_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Successful remap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (lcn >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* Setup buffer head to correct block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) + vcn_ofs) >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Only read initialized data blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (iblock < zblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) arr[nr++] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Fully non-initialized data block, zero it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto handle_zblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* It is a hole, need to zero it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (lcn == LCN_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto handle_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* If first try and runlist unmapped, map and retry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) is_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Attempt to map runlist, dropping lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * the duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err = ntfs_map_runlist(ni, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) goto lock_retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } else if (!rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * If buffer is outside the runlist, treat it as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * hole. This can happen due to concurrent truncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (err == -ENOENT || lcn == LCN_ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) goto handle_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Hard error, zero out region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) "attribute type 0x%x, vcn 0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) "offset 0x%x because its location on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) "disk could not be determined%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) "(error code %i).", ni->mft_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ni->type, (unsigned long long)vcn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) vcn_ofs, is_retry ? " even after "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) "retrying" : "", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Either iblock was outside lblock limits or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * of the page and set the buffer uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) handle_hole:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bh->b_blocknr = -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) clear_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) handle_zblock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) zero_user(page, i * blocksize, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) } while (i++, iblock++, (bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Release the lock if we took it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Check we have at least one buffer ready for i/o. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct buffer_head *tbh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Lock the buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tbh = arr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) lock_buffer(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tbh->b_end_io = ntfs_end_buffer_async_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) set_buffer_async_read(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Finally, start i/o on the buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) tbh = arr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (likely(!buffer_uptodate(tbh)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) submit_bh(REQ_OP_READ, 0, tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ntfs_end_buffer_async_read(tbh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* No i/o was scheduled on any of the buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (likely(!PageError(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else /* Signal synchronous i/o error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) nr = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * ntfs_readpage - fill a @page of a @file with data from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @file: open file to which the page @page belongs or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @page: page cache page to fill with data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * For non-resident attributes, ntfs_readpage() fills the @page of the open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * file @file by calling the ntfs version of the generic block_read_full_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * function, ntfs_read_block(), which in turn creates and reads in the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * associated with the page asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * data from the mft record (which at this stage is most likely in memory) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * even if the mft record is not cached at this point in time, we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * for it to be read in before we can do the copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Return 0 on success and -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int ntfs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ntfs_inode *ni, *base_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u8 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ntfs_attr_search_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) MFT_RECORD *mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 attr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) retry_readpage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Is the page fully outside i_size? (truncate in progress) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ntfs_debug("Read outside i_size - truncated?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * This can potentially happen because we clear PageUptodate() during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * ntfs_writepage() of MstProtected() attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Only $DATA attributes can be encrypted and only unnamed $DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * attributes can be compressed. Index root can have the flags set but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * this means to create compressed/encrypted files, not that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * attribute is compressed/encrypted. Note we need to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * AT_INDEX_ALLOCATION since this is the type of both directory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * index inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (ni->type != AT_INDEX_ALLOCATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* If attribute is encrypted, deny access, just like NT4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (NInoEncrypted(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Compressed data streams are handled in compress.c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (NInoNonResident(ni) && NInoCompressed(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) BUG_ON(ni->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ntfs_read_compressed_block(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* NInoNonResident() == NInoIndexAllocPresent() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (NInoNonResident(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Normal, non-resident data stream. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return ntfs_read_block(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Attribute is resident, implying it is not compressed or encrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * This also means the attribute is smaller than an mft record and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * hence smaller than a page, so can simply zero out any pages with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * index above 0. Note the attribute can actually be marked compressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * but if it is resident the actual data is not compressed so we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * ok to ignore the compressed flag here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (unlikely(page->index > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Map, pin, and lock the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mrec = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (IS_ERR(mrec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) err = PTR_ERR(mrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * If a parallel write made the attribute non-resident, drop the mft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * record and retry the readpage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (unlikely(NInoNonResident(ni))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto retry_readpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto unm_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto put_unm_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (unlikely(attr_len > ni->initialized_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) attr_len = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (unlikely(attr_len > i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Race with shrinking truncate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) attr_len = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Copy the data to the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) memcpy(addr, (u8*)ctx->attr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) le16_to_cpu(ctx->attr->data.resident.value_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Zero the remainder of the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) put_unm_err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unm_err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * ntfs_write_block - write a @page to the backing store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @page: page cache page to write out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @wbc: writeback control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * This function is for writing pages belonging to non-resident, non-mst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * protected attributes to their backing store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * For a page with buffers, map and write the dirty buffers asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * under page writeback. For a page without buffers, create buffers for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * page, then proceed as above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * If a page doesn't have buffers the page dirty state is definitive. If a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * does have buffers, the page dirty state is just a hint, and the buffer dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * state is definitive. (A hint which has rules: dirty buffers against a clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * page is illegal. Other combinations are legal and need to be handled. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * particular a dirty page containing clean buffers for example.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Return 0 on success and -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Based on ntfs_read_block() and __block_write_full_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) VCN vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) s64 initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) sector_t block, dblock, iblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ntfs_inode *ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ntfs_volume *vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) runlist_element *rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned int blocksize, vcn_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bool need_end_writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned char blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) "0x%lx.", ni->mft_no, ni->type, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) BUG_ON(!NInoNonResident(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) BUG_ON(NInoMstProtected(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) blocksize = vol->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) blocksize_bits = vol->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) BUG_ON(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) create_empty_buffers(page, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) (1 << BH_Uptodate) | (1 << BH_Dirty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (unlikely(!page_has_buffers(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ntfs_warning(vol->sb, "Error allocating page "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) "buffers. Redirtying page so we try "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) "again later.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Put the page back on mapping->dirty_pages, but leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * its buffers' dirty state as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) BUG_ON(!bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* NOTE: Different naming scheme to ntfs_read_block()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* The first block in the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* The first out of bounds block for the data size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dblock = (i_size + blocksize - 1) >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* The last (fully or partially) initialized block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) iblock = initialized_size >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Be very careful. We have no exclusion from __set_page_dirty_buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * here, and the (potentially unmapped) buffers may become dirty at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * any time. If a buffer becomes dirty here after we've inspected it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * then we just miss that fact, and the page stays dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * handle that here by just cleaning them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Loop through all the buffers in the page, mapping all the dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * buffers to disk addresses and handling any aliases from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * underlying block device's mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) bool is_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (unlikely(block >= dblock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Mapped buffers outside i_size will occur, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * this page can be outside i_size when there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * truncate in progress. The contents of such buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * were zeroed by ntfs_writepage().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * FIXME: What about the small race window where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * ntfs_writepage() has not done any clearing because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * the page was within i_size but before we get here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * vmtruncate() modifies i_size?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Clean buffers are not written out, so no need to map them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!buffer_dirty(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Make sure we have enough initialized size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (unlikely((block >= iblock) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) (initialized_size < i_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * If this page is fully outside initialized size, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * out all pages between the current initialized size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * and the current page. Just use ntfs_readpage() to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * the zeroing transparently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (block > iblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) // TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) // For each page do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) // - read_cache_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) // Again for each page do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) // - wait_on_page_locked()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) // - Check (PageUptodate(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) // !PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) // Update initialized size in the attribute and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) // in the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) // Again, for each page do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) // __set_page_dirty_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) // put_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) // We don't need to wait on the writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) // Update iblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * The current page straddles initialized size. Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * all non-uptodate buffers and set them uptodate (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * dirty?). Note, there aren't any non-uptodate buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * if the page is uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * FIXME: For an uptodate page, the buffers may need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * be written out because they were not initialized on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * disk before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) // TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) // Zero any non-uptodate buffers up to i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) // Set them uptodate and dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) // TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) // Update initialized size in the attribute and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) // inode (up to i_size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) // Update iblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) // FIXME: This is inefficient. Try to batch the two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) // size changes to happen in one go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ntfs_error(vol->sb, "Writing beyond initialized size "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) "is not supported yet. Sorry.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) // Do NOT set_buffer_new() BUT DO clear buffer range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) // outside write request range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) // set_buffer_uptodate() on complete buffers as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) // set_buffer_dirty().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* No need to map buffers that are already mapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (buffer_mapped(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Unmapped, dirty buffer. Need to map it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bh->b_bdev = vol->sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Convert block into corresponding vcn and offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) vcn = (VCN)block << blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) vcn_ofs = vcn & vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) vcn >>= vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (!rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) lock_retry_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) rl = ni->runlist.rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (likely(rl != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Seek to element containing target vcn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) while (rl->length && rl[1].vcn <= vcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) lcn = LCN_RL_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Successful remap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (lcn >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Setup buffer head to point to correct block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) vcn_ofs) >> blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* It is a hole, need to instantiate it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (lcn == LCN_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) u8 *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned long *bpos, *bend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Check if the buffer is zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) bpos = (unsigned long *)(kaddr + bh_offset(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) bend = (unsigned long *)((u8*)bpos + blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (unlikely(*bpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) } while (likely(++bpos < bend));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (bpos == bend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * Buffer is zero and sparse, no need to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) // TODO: Instantiate the hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) // clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) // clean_bdev_bh_alias(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ntfs_error(vol->sb, "Writing into sparse regions is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) "not supported yet. Sorry.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* If first try and runlist unmapped, map and retry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) is_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Attempt to map runlist, dropping lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * the duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) err = ntfs_map_runlist(ni, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto lock_retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else if (!rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * If buffer is outside the runlist, truncate has cut it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * of the runlist. Just clean and clear the buffer and set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * uptodate so it can get discarded by the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (err == -ENOENT || lcn == LCN_ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) zero_user(page, bh_offset(bh), blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* Failed to map the buffer, even after retrying. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) "because its location on disk could not be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) "determined%s (error code %i).", ni->mft_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ni->type, (unsigned long long)vcn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) vcn_ofs, is_retry ? " even after "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) "retrying" : "", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) } while (block++, (bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Release the lock if we took it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* For the error case, need to reset bh to the beginning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Just an optimization, so ->readpage() is not called later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int uptodate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) uptodate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (uptodate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Setup all mapped, dirty buffers for async write i/o. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (buffer_mapped(bh) && buffer_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (test_clear_buffer_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) BUG_ON(!buffer_uptodate(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mark_buffer_async_write(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) } else if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * For the error case. The buffer may have been set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * dirty during attachment to a dirty page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (err != -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) // TODO: Remove the -EOPNOTSUPP check later on...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (unlikely(err == -EOPNOTSUPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) else if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ntfs_warning(vol->sb, "Error allocating memory. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) "Redirtying page so we try again "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) "later.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * Put the page back on mapping->dirty_pages, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * leave its buffer's dirty state as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Submit the prepared buffers for i/o. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) need_end_writeback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct buffer_head *next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (buffer_async_write(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) submit_bh(REQ_OP_WRITE, 0, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) need_end_writeback = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* If no i/o was started, need to end_page_writeback(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (unlikely(need_end_writeback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * ntfs_write_mst_block - write a @page to the backing store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * @page: page cache page to write out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @wbc: writeback control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * This function is for writing pages belonging to non-resident, mst protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * attributes to their backing store. The only supported attributes are index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * allocation and $MFT/$DATA. Both directory inodes and index inodes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * supported for the index allocation case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * The page must remain locked for the duration of the write because we apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * the mst fixups, write, and then undo the fixups, so if we were to unlock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * page before undoing the fixups, any other user of the page will see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * page contents as corrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * We clear the page uptodate flag for the duration of the function to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * exclusion for the $MFT/$DATA case against someone mapping an mft record we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * are about to apply the mst fixups to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * Return 0 on success and -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * Based on ntfs_write_block(), ntfs_mft_writepage(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * write_mft_record_nolock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int ntfs_write_mst_block(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) sector_t block, dblock, rec_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct inode *vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ntfs_inode *ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ntfs_volume *vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) u8 *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned int rec_size = ni->itype.index.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct buffer_head *bh, *head, *tbh, *rec_start_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct buffer_head *bhs[MAX_BUF_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) runlist_element *rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned bh_size, rec_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) bool sync, is_mft, page_is_dirty, rec_is_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) unsigned char bh_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) "0x%lx.", vi->i_ino, ni->type, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) BUG_ON(!NInoNonResident(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) BUG_ON(!NInoMstProtected(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * in its page cache were to be marked dirty. However this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * never happen with the current driver and considering we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * handle this case here we do want to BUG(), at least for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bh_size = vol->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) bh_size_bits = vol->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) max_bhs = PAGE_SIZE / bh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) BUG_ON(!max_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Were we called for sync purposes? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) sync = (wbc->sync_mode == WB_SYNC_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Make sure we have mapped buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) BUG_ON(!bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) rec_size_bits = ni->itype.index.block_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) BUG_ON(!(PAGE_SIZE >> rec_size_bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) bhs_per_rec = rec_size >> bh_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) BUG_ON(!bhs_per_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* The first block in the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rec_block = block = (sector_t)page->index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) (PAGE_SHIFT - bh_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* The first out of bounds block for the data size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) page_is_dirty = rec_is_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rec_start_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) bool is_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (likely(block < rec_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (unlikely(block >= dblock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * This block is not the first one in the record. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * ignore the buffer's dirty state because we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * have raced with a parallel mark_ntfs_record_dirty().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!rec_is_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (unlikely(err2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (err2 != -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else /* if (block == rec_block) */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) BUG_ON(block > rec_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* This block is the first one in the record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rec_block += bhs_per_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) err2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (unlikely(block >= dblock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!buffer_dirty(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* Clean records are not written out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rec_is_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rec_is_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rec_start_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Need to map the buffer if it is not mapped already. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (unlikely(!buffer_mapped(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) VCN vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) unsigned int vcn_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) bh->b_bdev = vol->sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Obtain the vcn and offset of the current block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) vcn = (VCN)block << bh_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) vcn_ofs = vcn & vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) vcn >>= vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) lock_retry_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rl = ni->runlist.rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (likely(rl != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Seek to element containing target vcn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) while (rl->length && rl[1].vcn <= vcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) rl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) lcn = LCN_RL_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* Successful remap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (likely(lcn >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Setup buffer head to correct block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) bh->b_blocknr = ((lcn <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) vol->cluster_size_bits) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) vcn_ofs) >> bh_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * Remap failed. Retry to map the runlist once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * unless we are working on $MFT which always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * has the whole of its runlist in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!is_mft && !is_retry &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) lcn == LCN_RL_NOT_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) is_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Attempt to map runlist, dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * lock for the duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) err2 = ntfs_map_runlist(ni, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (likely(!err2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto lock_retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (err2 == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) page_is_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) lcn = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) err2 = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Hard error. Abort writing this record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!err || err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ntfs_error(vol->sb, "Cannot write ntfs record "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) "0x%llx (inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) "attribute type 0x%x) because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) "its location on disk could "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) "not be determined (error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "code %lli).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) (long long)block <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) bh_size_bits >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) vol->mft_record_size_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ni->mft_no, ni->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) (long long)lcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * If this is not the first buffer, remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * buffers in this record from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * buffers to write and clear their dirty bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * if not error -ENOMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (rec_start_bh != bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) while (bhs[--nr_bhs] != rec_start_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (err2 != -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) clear_buffer_dirty(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rec_start_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) } while ((rec_start_bh =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) rec_start_bh->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) b_this_page) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) BUG_ON(!buffer_uptodate(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) BUG_ON(nr_bhs >= max_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) bhs[nr_bhs++] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } while (block++, (bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (unlikely(rl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* If there were no dirty buffers, we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!nr_bhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* Map the page so we can access its contents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Clear the page uptodate flag whilst the mst fixups are applied. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) BUG_ON(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) unsigned int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Skip buffers which are not at the beginning of records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (i % bhs_per_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ofs = bh_offset(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (is_mft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ntfs_inode *tni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned long mft_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* Get the mft record number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) >> rec_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Check whether to write this mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) tni = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!ntfs_may_write_mft_record(vol, mft_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) (MFT_RECORD*)(kaddr + ofs), &tni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * The record should not be written. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * means we need to redirty the page before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) page_is_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * Remove the buffers in this mft record from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * the list of buffers to write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) bhs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) } while (++i % bhs_per_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * The record should be written. If a locked ntfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * inode was returned, add it to the array of locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * ntfs inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (tni)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) locked_nis[nr_locked_nis++] = tni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Apply the mst protection fixups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (unlikely(err2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!err || err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ntfs_error(vol->sb, "Failed to apply mst fixups "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) "(inode 0x%lx, attribute type 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) "page index 0x%lx, page offset 0x%x)!"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) " Unmount and run chkdsk.", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ni->type, page->index, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * Mark all the buffers in this record clean as we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * not want to write corrupt data to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) clear_buffer_dirty(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) bhs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } while (++i % bhs_per_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) nr_recs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* If no records are to be written out, we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!nr_recs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto unm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Lock buffers and start synchronous write i/o on them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!tbh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!trylock_buffer(tbh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* The buffer dirty state is now irrelevant, just clean it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) clear_buffer_dirty(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) BUG_ON(!buffer_uptodate(tbh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) BUG_ON(!buffer_mapped(tbh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) get_bh(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) tbh->b_end_io = end_buffer_write_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) submit_bh(REQ_OP_WRITE, 0, tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* Synchronize the mft mirror now if not @sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (is_mft && !sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto do_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) do_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Wait on i/o completion of buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (!tbh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) wait_on_buffer(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (unlikely(!buffer_uptodate(tbh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) ntfs_error(vol->sb, "I/O error while writing ntfs "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) "record buffer (inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) "attribute type 0x%x, page index "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) "0x%lx, page offset 0x%lx)! Unmount "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) "and run chkdsk.", vi->i_ino, ni->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) page->index, bh_offset(tbh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (!err || err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * Set the buffer uptodate so the page and buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * states do not become out of sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) set_buffer_uptodate(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* If @sync, now synchronize the mft mirror. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (is_mft && sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) do_mirror:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unsigned long mft_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) unsigned int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Skip buffers which are not at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (i % bhs_per_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Skip removed buffers (and hence records). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!tbh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ofs = bh_offset(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Get the mft record number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) >> rec_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (mft_no < vol->mftmirr_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ntfs_sync_mft_mirror(vol, mft_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) (MFT_RECORD*)(kaddr + ofs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto do_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Remove the mst protection fixups again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (!(i % bhs_per_rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!tbh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) post_write_mst_fixup((NTFS_RECORD*)(kaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) bh_offset(tbh)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) unm_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* Unlock any locked inodes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) while (nr_locked_nis-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ntfs_inode *tni, *base_tni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) tni = locked_nis[nr_locked_nis];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Get the base inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) mutex_lock(&tni->extent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (tni->nr_extents >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) base_tni = tni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) base_tni = tni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) BUG_ON(!base_tni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) mutex_unlock(&tni->extent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ntfs_debug("Unlocking %s inode 0x%lx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) tni == base_tni ? "base" : "extent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) tni->mft_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) mutex_unlock(&tni->mrec_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) atomic_dec(&tni->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) iput(VFS_I(base_tni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (unlikely(err && err != -ENOMEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * Set page error if there is only one ntfs record in the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Otherwise we would loose per-record granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (ni->itype.index.block_size == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (page_is_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ntfs_debug("Page still contains one or more dirty ntfs "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) "records. Redirtying the page starting at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) "record 0x%lx.", page->index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) (PAGE_SHIFT - rec_size_bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * Keep the VM happy. This must be done otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * the page is clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * ntfs_writepage - write a @page to the backing store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * @page: page cache page to write out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * @wbc: writeback control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * This is called from the VM when it wants to have a dirty ntfs page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * page cleaned. The VM has already locked the page and marked it clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * For non-resident attributes, ntfs_writepage() writes the @page by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * the ntfs version of the generic block_write_full_page() function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * ntfs_write_block(), which in turn if necessary creates and writes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * buffers associated with the page asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * the data to the mft record (which at this stage is most likely in memory).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * The mft record is then marked dirty and written out asynchronously via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * vfs inode dirty code path for the inode the mft record belongs to or via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * vm page dirty code path for the page the mft record is in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * Return 0 on success and -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct inode *vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ntfs_attr_search_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) MFT_RECORD *m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) u32 attr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) retry_writepage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* Is the page fully outside i_size? (truncate in progress) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * The page may have dirty, unmapped buffers. Make them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * freeable here, so the page does not leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) block_invalidatepage(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ntfs_debug("Write outside i_size - truncated?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * Only $DATA attributes can be encrypted and only unnamed $DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * attributes can be compressed. Index root can have the flags set but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * this means to create compressed/encrypted files, not that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * attribute is compressed/encrypted. Note we need to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * AT_INDEX_ALLOCATION since this is the type of both directory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * index inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (ni->type != AT_INDEX_ALLOCATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* If file is encrypted, deny access, just like NT4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (NInoEncrypted(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ntfs_debug("Denying write access to encrypted file.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* Compressed data streams are handled in compress.c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (NInoNonResident(ni) && NInoCompressed(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) BUG_ON(ni->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) // TODO: Implement and replace this with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) // return ntfs_write_compressed_block(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) ntfs_error(vi->i_sb, "Writing to compressed files is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) "not supported yet. Sorry.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) // TODO: Implement and remove this check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (NInoNonResident(ni) && NInoSparse(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ntfs_error(vi->i_sb, "Writing to sparse files is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) "supported yet. Sorry.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* NInoNonResident() == NInoIndexAllocPresent() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (NInoNonResident(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* We have to zero every time due to mmap-at-end-of-file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (page->index >= (i_size >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* The page straddles i_size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) unsigned int ofs = i_size & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) zero_user_segment(page, ofs, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* Handle mst protected attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (NInoMstProtected(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return ntfs_write_mst_block(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Normal, non-resident data stream. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return ntfs_write_block(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * Attribute is resident, implying it is not compressed, encrypted, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * mst protected. This also means the attribute is smaller than an mft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * record and hence smaller than a page, so can simply return error on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * any pages with index above 0. Note the attribute can actually be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * marked compressed but if it is resident the actual data is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * compressed so we are ok to ignore the compressed flag here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) BUG_ON(page_has_buffers(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) BUG_ON(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (unlikely(page->index > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) "Aborting write.", page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* Map, pin, and lock the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * If a parallel write made the attribute non-resident, drop the mft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * record and retry the writepage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (unlikely(NInoNonResident(ni))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto retry_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * Keep the VM happy. This must be done otherwise the radix-tree tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (unlikely(attr_len > i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* Race with shrinking truncate or a failed truncate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) attr_len = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * If the truncate failed, fix it up now. If a concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * truncate, we do its job, so it does not have to do anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* Shrinking cannot fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* Copy the data from the page to the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) memcpy((u8*)ctx->attr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) le16_to_cpu(ctx->attr->data.resident.value_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) addr, attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /* Zero out of bounds area in the page cache page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* We are done with the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Finally, mark the mft record dirty, so it gets written back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) "page so we try again later.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * Put the page back on mapping->dirty_pages, but leave its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * buffers' dirty state as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ntfs_error(vi->i_sb, "Resident attribute write failed with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) "error %i.", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) NVolSetErrors(ni->vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * ntfs_bmap - map logical file block to physical device block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * @mapping: address space mapping to which the block to be mapped belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * @block: logical block to map to its physical device block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * For regular, non-resident files (i.e. not compressed and not encrypted), map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * the logical @block belonging to the file described by the address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * mapping @mapping to its physical device block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * The size of the block is equal to the @s_blocksize field of the super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * of the mounted file system which is guaranteed to be smaller than or equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * to the cluster size thus the block is guaranteed to fit entirely inside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * cluster which means we do not need to care how many contiguous bytes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * available after the beginning of the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * Return the physical device block if the mapping succeeded or 0 if the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * is sparse or there was an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * Note: This is a problem if someone tries to run bmap() on $Boot system file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * as that really is in block zero but there is nothing we can do. bmap() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * just broken in that respect (just like it cannot distinguish sparse from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * not available or error).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) s64 ofs, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) unsigned long blocksize, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ntfs_inode *ni = NTFS_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ntfs_volume *vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) unsigned delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) unsigned char blocksize_bits, cluster_size_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) ni->mft_no, (unsigned long long)block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ntfs_error(vol->sb, "BMAP does not make sense for %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) "attributes, returning 0.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) (ni->type != AT_DATA) ? "non-data" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) (!NInoNonResident(ni) ? "resident" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) "encrypted"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* None of these can happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) BUG_ON(NInoCompressed(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) BUG_ON(NInoMstProtected(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) blocksize = vol->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) blocksize_bits = vol->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ofs = (s64)block << blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) i_size = i_size_read(VFS_I(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * If the offset is outside the initialized size or the block straddles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * the initialized size then pretend it is a hole unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * initialized size equals the file size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) goto hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) cluster_size_shift = vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (unlikely(lcn < LCN_HOLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * Step down to an integer to avoid gcc doing a long long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * comparision in the switch when we know @lcn is between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * LCN_HOLE and LCN_EIO (i.e. -1 to -5).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * Otherwise older gcc (at least on some architectures) will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * try to use __cmpdi2() which is of course not available in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) switch ((int)lcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) case LCN_ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * If the offset is out of bounds then pretend it is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) goto hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) case LCN_ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ntfs_error(vol->sb, "Not enough memory to complete "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) "mapping for inode 0x%lx. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) "Returning 0.", ni->mft_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ntfs_error(vol->sb, "Failed to complete mapping for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) "inode 0x%lx. Run chkdsk. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) "Returning 0.", ni->mft_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (lcn < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* It is a hole. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) hole:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) ntfs_debug("Done (returning hole).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * The block is really allocated and fullfils all our criteria.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * Convert the cluster to units of block size and return the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) delta = ofs & vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (unlikely(sizeof(block) < sizeof(lcn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) block = lcn = ((lcn << cluster_size_shift) + delta) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* If the block number was truncated return 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (unlikely(block != lcn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ntfs_error(vol->sb, "Physical block 0x%llx is too "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) "large to be returned, returning 0.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) (long long)lcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) block = ((lcn << cluster_size_shift) + delta) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * ntfs_normal_aops - address space operations for normal inodes and attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * Note these are not used for compressed or mst protected inodes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) const struct address_space_operations ntfs_normal_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .readpage = ntfs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) .writepage = ntfs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) .set_page_dirty = __set_page_dirty_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) .bmap = ntfs_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * ntfs_compressed_aops - address space operations for compressed inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) const struct address_space_operations ntfs_compressed_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) .readpage = ntfs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) .writepage = ntfs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) .set_page_dirty = __set_page_dirty_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) .is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * ntfs_mst_aops - general address space operations for mst protecteed inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * and attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) const struct address_space_operations ntfs_mst_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) .readpage = ntfs_readpage, /* Fill page with data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) .writepage = ntfs_writepage, /* Write dirty page to disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) without touching the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) belonging to the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) .is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * mark_ntfs_record_dirty - mark an ntfs record dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * @page: page containing the ntfs record to mark dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * @ofs: byte offset within @page at which the ntfs record begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * Set the buffers and the page in which the ntfs record is located dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * The latter also marks the vfs inode the ntfs record belongs to dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * (I_DIRTY_PAGES only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * If the page does not have buffers, we create them and set them uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * The page may not be locked which is why we need to handle the buffers under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * the mapping->private_lock. Once the buffers are marked dirty we no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * need the lock since try_to_free_buffers() does not free dirty buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ntfs_inode *ni = NTFS_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct buffer_head *bh, *head, *buffers_to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) unsigned int end, bh_size, bh_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) BUG_ON(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) end = ofs + ni->itype.index.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) bh_size = VFS_I(ni)->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) spin_lock(&mapping->private_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (unlikely(!page_has_buffers(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) spin_unlock(&mapping->private_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) bh = head = alloc_page_buffers(page, bh_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) spin_lock(&mapping->private_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (likely(!page_has_buffers(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct buffer_head *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) tail = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) } while (bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) tail->b_this_page = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) attach_page_private(page, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) buffers_to_free = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) BUG_ON(!bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) bh_ofs = bh_offset(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (bh_ofs + bh_size <= ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (unlikely(bh_ofs >= end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) set_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) spin_unlock(&mapping->private_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (unlikely(buffers_to_free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) bh = buffers_to_free->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) free_buffer_head(buffers_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) buffers_to_free = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) } while (buffers_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) #endif /* NTFS_RW */