^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "attrib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "bitmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "lcnalloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "malloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "mft.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "ntfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * ntfs_file_open - called when an inode is about to be opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @vi: inode to be opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @filp: file structure describing the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Limit file size to the page cache limit on architectures where unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * is 32-bits. This is the most we can do for now without overflowing the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * cache page index. Doing it this way means we don't run into problems because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * of existing too large files. It would be better to allow the user to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * the beginning of the file but I doubt very much anyone is going to hit this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * check on a 32-bit architecture, so there is no point in adding the extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * complexity required to support this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * On 64-bit architectures, the check is hopefully optimized away by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * compiler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * After the check passes, just call generic_file_open() to do its work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int ntfs_file_open(struct inode *vi, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (sizeof(unsigned long) < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (i_size_read(vi) > MAX_LFS_FILESIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return generic_file_open(vi, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * ntfs_attr_extend_initialized - extend the initialized size of an attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @ni: ntfs inode of the attribute to extend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @new_init_size: requested new initialized size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Extend the initialized size of an attribute described by the ntfs inode @ni
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * to @new_init_size bytes. This involves zeroing any non-sparse space between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the old initialized size and @new_init_size both in the page cache and on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * disk (if relevant complete pages are already uptodate in the page cache then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * these are simply marked dirty).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * in the resident attribute case, it is tied to the initialized size and, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the non-resident attribute case, it may not fall below the initialized size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Note that if the attribute is resident, we do not need to touch the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * cache at all. This is because if the page cache page is not uptodate we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * bring it uptodate later, when doing the write to the mft record since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * then already have the page mapped. And if the page is uptodate, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * non-initialized region will already have been zeroed when the page was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * brought uptodate and the region may in fact already have been overwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * with new data via mmap() based writes, so we cannot just zero it. And since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * is unspecified, we choose not to do zeroing and thus we do not need to touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * the page at all. For a more detailed explanation see ntfs_truncate() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * fs/ntfs/inode.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Return 0 on success and -errno on error. In the case that an error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * encountered it is possible that the initialized size will already have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * incremented some way towards @new_init_size but it is guaranteed that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * this is the case, the necessary zeroing will also have happened and that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * metadata is self-consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * held by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) s64 old_init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) loff_t old_i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pgoff_t index, end_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct inode *vi = VFS_I(ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ntfs_inode *base_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) MFT_RECORD *m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ATTR_RECORD *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ntfs_attr_search_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u8 *kattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 attr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) old_init_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) old_i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) BUG_ON(new_init_size > ni->allocated_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "old_initialized_size 0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "new_initialized_size 0x%llx, i_size 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) vi->i_ino, (unsigned)le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) (unsigned long long)old_init_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) (unsigned long long)new_init_size, old_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Use goto to reduce indentation and we need the label below anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (NInoNonResident(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto do_non_resident_extend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) BUG_ON(old_init_size != old_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) m = ctx->mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) BUG_ON(a->non_resident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* The total length of the attribute value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) attr_len = le32_to_cpu(a->data.resident.value_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) BUG_ON(old_i_size != (loff_t)attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Do the zeroing in the mft record and update the attribute size in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * the mft record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) memset(kattr + attr_len, 0, new_init_size - attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Finally, update the sizes in the vfs and ntfs inodes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) i_size_write(vi, new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ni->initialized_size = new_init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) do_non_resident_extend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * If the new initialized size @new_init_size exceeds the current file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * size (vfs inode->i_size), we need to extend the file size to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * new initialized size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (new_init_size > old_i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) m = ctx->mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) BUG_ON(!a->non_resident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) BUG_ON(old_i_size != (loff_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sle64_to_cpu(a->data.non_resident.data_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Update the file size in the vfs inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) i_size_write(vi, new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mapping = vi->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) index = old_init_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Read the page. If the page is not present, this will zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * the uninitialized regions for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) page = read_mapping_page(mapping, index, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) err = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto init_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (unlikely(PageError(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto init_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Update the initialized size in the ntfs inode. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * enough to make ntfs_writepage() work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (ni->initialized_size > new_init_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ni->initialized_size = new_init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Set the page dirty so it gets written out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Play nice with the vm and the rest of the system. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * very much needed as we can potentially be modifying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * initialised size from a very small value to a really huge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * value, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * f = open(somefile, O_TRUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * truncate(f, 10GiB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * seek(f, 10GiB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * write(f, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * And this would mean we would be marking dirty hundreds of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * thousands of pages or as in the above example more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * two and a half million pages!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * TODO: For sparse pages could optimize this workload by using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * would be set in readpage for sparse pages and here we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * not need to mark dirty any pages which have this bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * The only caveat is that we have to clear the bit everywhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * where we allocate any clusters that lie in the page or that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * contain the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * TODO: An even greater optimization would be for us to only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * call readpage() on pages which are not in sparse regions as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * determined from the runlist. This would greatly reduce the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * number of pages we read and make dirty in the case of sparse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) balance_dirty_pages_ratelimited(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } while (++index < end_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) BUG_ON(ni->initialized_size != new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Now bring in sync the initialized_size in the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto init_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto init_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) goto init_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) m = ctx->mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) BUG_ON(!a->non_resident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) (unsigned long long)new_init_size, i_size_read(vi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) init_err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ni->initialized_size = old_init_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ntfs_debug("Failed. Returning error code %i.", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) loff_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) s64 end, ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct inode *vi = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ntfs_inode *base_ni, *ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ntfs_volume *vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) "0x%llx, count 0x%zx.", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (unsigned)le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) (unsigned long long)iocb->ki_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) iov_iter_count(from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) err = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (unlikely(err <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * All checks have passed. Before we start doing any writing we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * to abort any totally illegal writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) BUG_ON(NInoMstProtected(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* If file is encrypted, deny access, just like NT4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (NInoEncrypted(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Only $DATA attributes can be encrypted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * Reminder for later: Encrypted files are _always_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * non-resident so that the content can always be encrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ntfs_debug("Denying write access to encrypted file.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (NInoCompressed(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Only unnamed $DATA attribute can be compressed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) BUG_ON(ni->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Reminder for later: If resident, the data is not actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * compressed. Only on the switch to non-resident does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * compression kick in. This is in contrast to encrypted files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * (see above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ntfs_error(vi->i_sb, "Writing to compressed files is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "implemented yet. Sorry.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) err = file_remove_privs(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Our ->update_time method always succeeds thus file_update_time()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * cannot fail either so there is no need to check the return code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) pos = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* The first byte after the last cluster being written to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ~(u64)vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * If the write goes beyond the allocated size, extend the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * to cover the whole of the write, rounded up to the nearest cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ll = ni->allocated_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (end > ll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Extend the allocation without changing the data size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Note we ensure the allocation is big enough to at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * write some data but we do not require the allocation to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * complete, i.e. it may be partial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (likely(ll >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) BUG_ON(pos >= ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* If the extension was partial truncate the write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (end > ll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ntfs_debug("Truncating write to inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) "attribute type 0x%x, because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) "the allocation was only "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) "partially extended.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) vi->i_ino, (unsigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) le32_to_cpu(ni->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) iov_iter_truncate(from, ll - pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) err = ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ll = ni->allocated_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Perform a partial write if possible or fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (pos < ll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ntfs_debug("Truncating write to inode 0x%lx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) "attribute type 0x%x, because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) "extending the allocation "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "failed (error %d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) vi->i_ino, (unsigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) (int)-err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) iov_iter_truncate(from, ll - pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (err != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ntfs_error(vi->i_sb, "Cannot perform "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "write to inode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "0x%lx, attribute "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "type 0x%x, because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "extending the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) "allocation failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "(error %ld).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) vi->i_ino, (unsigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (long)-err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ntfs_debug("Cannot perform write to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) "inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "attribute type 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) "because there is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "space left.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) vi->i_ino, (unsigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) le32_to_cpu(ni->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * If the write starts beyond the initialized size, extend it up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * beginning of the write and initialize all non-sparse space between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * the old initialized size and the new one. This automatically also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * increments the vfs inode->i_size to keep it above or equal to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * initialized_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ll = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (pos > ll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Wait for ongoing direct i/o to complete before proceeding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * New direct i/o cannot start as we hold i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) inode_dio_wait(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err = ntfs_attr_extend_initialized(ni, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ntfs_error(vi->i_sb, "Cannot perform write to inode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) "0x%lx, attribute type 0x%x, because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) "extending the initialized size "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) "failed (error %d).", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (unsigned)le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) (int)-err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * __ntfs_grab_cache_pages - obtain a number of locked pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @mapping: address space mapping from which to obtain page cache pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @index: starting index in @mapping at which to begin obtaining pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @nr_pages: number of page cache pages to obtain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @pages: array of pages in which to return the obtained page cache pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * @cached_page: allocated but as yet unused page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Obtain @nr_pages locked page cache pages from the mapping @mapping and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * starting at index @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * If a page is newly created, add it to lru list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Note, the page locks are obtained in ascending page index order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) pgoff_t index, const unsigned nr_pages, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct page **cached_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int err, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) BUG_ON(!nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) err = nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) FGP_ACCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!pages[nr]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!*cached_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *cached_page = page_cache_alloc(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (unlikely(!*cached_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err = add_to_page_cache_lru(*cached_page, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mapping_gfp_constraint(mapping, GFP_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (err == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pages[nr] = *cached_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) *cached_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) } while (nr < nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) while (nr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unlock_page(pages[--nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) put_page(pages[nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bh->b_end_io = end_buffer_read_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return submit_bh(REQ_OP_READ, 0, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @pages: array of destination pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @nr_pages: number of pages in @pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @pos: byte position in file at which the write begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @bytes: number of bytes to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * This is called for non-resident attributes from ntfs_file_buffered_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * with i_mutex held on the inode (@pages[0]->mapping->host). There are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @nr_pages pages in @pages which are locked but not kmap()ped. The source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * data has not yet been copied into the @pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Need to fill any holes with actual clusters, allocate buffers if necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * ensure all the buffers are mapped, and bring uptodate any buffers that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * only partially being written to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * If @nr_pages is greater than one, we are guaranteed that the cluster size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * greater than PAGE_SIZE, that all pages in @pages are entirely inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * the same cluster and that they are the entirety of that cluster, and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * i_size is not to be modified yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Return 0 on success or -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) unsigned nr_pages, s64 pos, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) s64 bh_pos, vcn_len, end, initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) sector_t lcn_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ntfs_inode *ni, *base_ni = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ntfs_volume *vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) runlist_element *rl, *rl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ntfs_attr_search_ctx *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) MFT_RECORD *m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ATTR_RECORD *a = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u32 attr_rec_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) unsigned blocksize, u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int err, mp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bool rl_write_locked, was_hole, is_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unsigned char blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u8 runlist_merged:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) u8 mft_attr_mapped:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u8 mp_rebuilt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u8 attr_switched:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } status = { 0, 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) BUG_ON(!nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) BUG_ON(!pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) BUG_ON(!*pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) vi = pages[0]->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) vi->i_ino, ni->type, pages[0]->index, nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (long long)pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) blocksize = vol->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) blocksize_bits = vol->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) page = pages[u];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) BUG_ON(!page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * create_empty_buffers() will create uptodate/dirty buffers if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * the page is uptodate/dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) create_empty_buffers(page, blocksize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (unlikely(!page_has_buffers(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) } while (++u < nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) rl_write_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) vcn = lcn = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) vcn_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) lcn_block = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) was_hole = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) cpos = pos >> vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) end = pos + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Loop over each page and for each page over each buffer. Use goto to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * reduce indentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) do_next_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) page = pages[u];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bh_pos = (s64)page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) VCN cdelta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) s64 bh_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned bh_cofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Clear buffer_new on all buffers to reinitialise state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (buffer_new(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) bh_end = bh_pos + blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) bh_cpos = bh_pos >> vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) bh_cofs = bh_pos & vol->cluster_size_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (buffer_mapped(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * The buffer is already mapped. If it is uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * The buffer is not uptodate. If the page is uptodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * set the buffer uptodate and otherwise ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Neither the page nor the buffer are uptodate. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * the buffer is only partially being written to, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * need to read it in before the write, i.e. now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if ((bh_pos < pos && bh_end > pos) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) (bh_pos < end && bh_end > end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * If the buffer is fully or partially within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * the initialized size, do an actual read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Otherwise, simply zero the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (bh_pos < initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ntfs_submit_bh_for_read(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) *wait_bh++ = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) zero_user(page, bh_offset(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Unmapped buffer. Need to map it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bh->b_bdev = vol->sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * If the current buffer is in the same clusters as the map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * cache, there is no need to check the runlist again. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * map cache is made up of @vcn, which is the first cached file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * cluster, @vcn_len which is the number of cached file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * clusters, @lcn is the device cluster corresponding to @vcn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * and @lcn_block is the block number corresponding to @lcn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) cdelta = bh_cpos - vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) map_buffer_cached:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) BUG_ON(lcn < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bh->b_blocknr = lcn_block +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) (cdelta << (vol->cluster_size_bits -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) blocksize_bits)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) (bh_cofs >> blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) set_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * If the page is uptodate so is the buffer. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * buffer is fully outside the write, we ignore it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * it was already allocated and we mark it dirty so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * gets written out if we allocated it. On the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * hand, if we allocated the buffer but we are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * marking it dirty we set buffer_new so we can do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * error recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (unlikely(was_hole)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* We allocated the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) clean_bdev_bh_alias(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (bh_end <= pos || bh_pos >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) set_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Page is _not_ uptodate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (likely(!was_hole)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Buffer was already allocated. If it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * uptodate and is only partially being written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * to, we need to read it in before the write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * i.e. now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!buffer_uptodate(bh) && bh_pos < end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) bh_end > pos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) (bh_pos < pos ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) bh_end > end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * If the buffer is fully or partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * within the initialized size, do an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * actual read. Otherwise, simply zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) read_lock_irqsave(&ni->size_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) read_unlock_irqrestore(&ni->size_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (bh_pos < initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ntfs_submit_bh_for_read(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *wait_bh++ = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) zero_user(page, bh_offset(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* We allocated the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) clean_bdev_bh_alias(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * If the buffer is fully outside the write, zero it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * set it uptodate, and mark it dirty so it gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * written out. If it is partially being written to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * zero region surrounding the write but leave it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * commit write to do anything else. Finally, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * buffer is fully being overwritten, do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (bh_end <= pos || bh_pos >= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) zero_user(page, bh_offset(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) set_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!buffer_uptodate(bh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) (bh_pos < pos || bh_end > end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u8 *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) unsigned pofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (bh_pos < pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) pofs = bh_pos & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) memset(kaddr + pofs, 0, pos - bh_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (bh_end > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) pofs = end & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) memset(kaddr + pofs, 0, bh_end - end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Slow path: this is the first buffer in the cluster. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * is outside allocated size and is not uptodate, zero it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * set it uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) initialized_size = ni->allocated_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (bh_pos > initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) } else if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) zero_user(page, bh_offset(bh), blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) is_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) retry_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rl = ni->runlist.rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (likely(rl != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Seek to element containing target cluster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) while (rl->length && rl[1].vcn <= bh_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (likely(lcn >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Successful remap, setup the map cache and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * use that to deal with the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) was_hole = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) vcn = bh_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) vcn_len = rl[1].vcn - vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) lcn_block = lcn << (vol->cluster_size_bits -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cdelta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * If the number of remaining clusters touched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * by the write is smaller or equal to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * number of cached clusters, unlock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * runlist as the map cache will be used from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * now on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (likely(vcn + vcn_len >= cend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (rl_write_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) up_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) rl_write_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto map_buffer_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) lcn = LCN_RL_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * If it is not a hole and not out of bounds, the runlist is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * probably unmapped so try to map it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* Attempt to map runlist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!rl_write_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * We need the runlist locked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * writing, so if it is locked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * reading relock it now and retry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * case it changed whilst we dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) down_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) rl_write_locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) err = ntfs_map_runlist_nolock(ni, bh_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) is_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * If @vcn is out of bounds, pretend @lcn is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * LCN_ENOENT. As long as the buffer is out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * of bounds this will work fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) lcn = LCN_ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto rl_not_mapped_enoent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Failed to map the buffer, even after retrying. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) "attribute type 0x%x, vcn 0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) "vcn offset 0x%x, because its "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) "location on disk could not be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) "determined%s (error code %i).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ni->mft_no, ni->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) (unsigned long long)bh_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (unsigned)bh_pos &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) vol->cluster_size_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) is_retry ? " even after retrying" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rl_not_mapped_enoent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * The buffer is in a hole or out of bounds. We need to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * the hole, unless the buffer is in a cluster which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * touched by the write, in which case we just leave the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * unmapped. This can only happen when the cluster size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * less than the page cache size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (unlikely(vol->cluster_size < PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bh_cend = (bh_end + vol->cluster_size - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if ((bh_cend <= cpos || bh_cpos >= cend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) bh->b_blocknr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * If the buffer is uptodate we skip it. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * is not but the page is uptodate, we can set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * the buffer uptodate. If the page is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * uptodate, we can clear the buffer and set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * uptodate. Whether this is worthwhile is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * debatable and this could be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) } else if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) zero_user(page, bh_offset(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Out of bounds buffer is invalid if it was not really out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) BUG_ON(lcn != LCN_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * We need the runlist locked for writing, so if it is locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * for reading relock it now and retry in case it changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * whilst we dropped the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) BUG_ON(!rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!rl_write_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) down_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) rl_write_locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* Find the previous last allocated cluster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) BUG_ON(rl->lcn != LCN_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) lcn = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rl2 = rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) while (--rl2 >= ni->runlist.rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (rl2->lcn >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) lcn = rl2->lcn + rl2->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (IS_ERR(rl2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) err = PTR_ERR(rl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ntfs_debug("Failed to allocate cluster, error code %i.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) lcn = rl2->lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (IS_ERR(rl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err = PTR_ERR(rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (err != -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ntfs_cluster_free_from_rl(vol, rl2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ntfs_error(vol->sb, "Failed to release "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "allocated cluster in error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) "code path. Run chkdsk to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) "recover the lost cluster.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ntfs_free(rl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ni->runlist.rl = rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) status.runlist_merged = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ntfs_debug("Allocated cluster, lcn 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) (unsigned long long)lcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* Map and lock the mft record and get the attribute record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) status.mft_attr_mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) m = ctx->mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Find the runlist element with which the attribute extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * starts. Note, we cannot use the _attr_ version because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * have mapped the mft record. That is ok because we know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * runlist fragment must be mapped already to have ever gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * here, so we can just use the _rl_ version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) BUG_ON(!rl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) BUG_ON(!rl2->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) BUG_ON(rl2->lcn < LCN_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * If @highest_vcn is zero, calculate the real highest_vcn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * (which can really be zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!highest_vcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) highest_vcn = (sle64_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) a->data.non_resident.allocated_size) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) vol->cluster_size_bits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * Determine the size of the mapping pairs array for the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * extent, i.e. the old extent with the hole filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) highest_vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (unlikely(mp_size <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!(err = mp_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ntfs_debug("Failed to get size for mapping pairs "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) "array, error code %i.", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Resize the attribute record to fit the new mapping pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) attr_rec_len = le32_to_cpu(a->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) a->data.non_resident.mapping_pairs_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) BUG_ON(err != -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) // TODO: Deal with this by using the current attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) // and fill it with as much of the mapping pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) // array as possible. Then loop over each attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) // extent rewriting the mapping pairs arrays as we go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) // along and if when we reach the end we have not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) // enough space, try to resize the last attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) // extent and if even that fails, add a new attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) // extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) // We could also try to resize at each step in the hope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) // that we will not need to rewrite every single extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) // Note, we may need to decompress some extents to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) // the runlist as we are walking the extents...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ntfs_error(vol->sb, "Not enough space in the mft "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) "record for the extended attribute "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) "record. This case is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) "implemented yet.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) status.mp_rebuilt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * Generate the mapping pairs array directly into the attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) a->data.non_resident.mapping_pairs_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mp_size, rl2, vcn, highest_vcn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) "attribute type 0x%x, because building "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) "the mapping pairs failed with error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) "code %i.", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) (unsigned)le32_to_cpu(ni->type), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* Update the highest_vcn but only if it was not set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (unlikely(!a->data.non_resident.highest_vcn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) a->data.non_resident.highest_vcn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cpu_to_sle64(highest_vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * If the attribute is sparse/compressed, update the compressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * size in the ntfs_inode structure and the attribute record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * If we are not in the first attribute extent, switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * to it, but first ensure the changes will make it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * disk later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (a->data.non_resident.lowest_vcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ntfs_attr_reinit_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) err = ntfs_attr_lookup(ni->type, ni->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ni->name_len, CASE_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) status.attr_switched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* @m is not used any more so do not set it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ni->itype.compressed.size += vol->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) a->data.non_resident.compressed_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) cpu_to_sle64(ni->itype.compressed.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Ensure the changes make it to disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Successfully filled the hole. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) status.runlist_merged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) status.mft_attr_mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) status.mp_rebuilt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* Setup the map cache and use that to deal with the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) was_hole = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) vcn = bh_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) vcn_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cdelta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * If the number of remaining clusters in the @pages is smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * or equal to the number of cached clusters, unlock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * runlist as the map cache will be used from now on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (likely(vcn + vcn_len >= cend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) up_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rl_write_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto map_buffer_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* If there are no errors, do the next page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (likely(!err && ++u < nr_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) goto do_next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* If there are no errors, release the runlist lock if we took it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (unlikely(rl_write_locked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) up_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) rl_write_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) } else if (unlikely(rl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* If we issued read requests, let them complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) while (wait_bh > wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) bh = *--wait_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) wait_on_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (likely(buffer_uptodate(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) page = bh->b_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) bh_pos = ((s64)page->index << PAGE_SHIFT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bh_offset(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * If the buffer overflows the initialized size, need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * to zero the overflowing region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (unlikely(bh_pos + blocksize > initialized_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (likely(bh_pos < initialized_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ofs = initialized_size - bh_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) zero_user_segment(page, bh_offset(bh) + ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) } else /* if (unlikely(!buffer_uptodate(bh))) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* Clear buffer_new on all buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) bh = head = page_buffers(pages[u]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (buffer_new(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) } while (++u < nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (status.attr_switched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Get back to the attribute extent we modified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ntfs_attr_reinit_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ntfs_error(vol->sb, "Failed to find required "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) "attribute extent of attribute in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) "error code path. Run chkdsk to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) "recover.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ni->itype.compressed.size += vol->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * The only thing that is now wrong is the compressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * size of the base attribute extent which chkdsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * should be able to fix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) m = ctx->mrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) status.attr_switched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * If the runlist has been modified, need to restore it by punching a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * hole into it and we then need to deallocate the on-disk cluster as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * well. Note, we only modify the runlist if we are able to generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * new mapping pairs array, i.e. only when the mapped attribute extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * is not switched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (status.runlist_merged && !status.attr_switched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) BUG_ON(!rl_write_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* Make the file cluster we allocated sparse in the runlist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ntfs_error(vol->sb, "Failed to punch hole into "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) "attribute runlist in error code "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) "path. Run chkdsk to recover the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) "lost cluster.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) } else /* if (success) */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) status.runlist_merged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * Deallocate the on-disk cluster we allocated but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * if we succeeded in punching its vcn out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * runlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) down_write(&vol->lcnbmp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ntfs_error(vol->sb, "Failed to release "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) "allocated cluster in error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) "code path. Run chkdsk to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) "recover the lost cluster.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) up_write(&vol->lcnbmp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Resize the attribute record to its old size and rebuild the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * pairs array. Note, we only can do this if the runlist has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * restored to its old state which also implies that the mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * attribute extent is not switched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (status.mp_rebuilt && !status.runlist_merged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ntfs_error(vol->sb, "Failed to restore attribute "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) "record in error code path. Run "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) "chkdsk to recover.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) } else /* if (success) */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (ntfs_mapping_pairs_build(vol, (u8*)a +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) le16_to_cpu(a->data.non_resident.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mapping_pairs_offset), attr_rec_len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) le16_to_cpu(a->data.non_resident.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) mapping_pairs_offset), ni->runlist.rl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) vcn, highest_vcn, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ntfs_error(vol->sb, "Failed to restore "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) "mapping pairs array in error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) "code path. Run chkdsk to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) "recover.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) NVolSetErrors(vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* Release the mft record and the attribute. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (status.mft_attr_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Release the runlist lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (rl_write_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) up_write(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) else if (rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Zero out any newly allocated blocks to avoid exposing stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * If BH_New is set, we know that the block was newly allocated above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * and that it has not been fully zeroed and marked dirty yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) nr_pages = u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) end = bh_cpos << vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) page = pages[u];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (u == nr_pages &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ((s64)page->index << PAGE_SHIFT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) bh_offset(bh) >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!buffer_new(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!buffer_uptodate(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) zero_user(page, bh_offset(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) } while ((bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) } while (++u <= nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static inline void ntfs_flush_dcache_pages(struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) BUG_ON(!nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * Warning: Do not do the decrement at the same time as the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * flush_dcache_page() because it is a NULL macro on i386 and hence the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * decrement never happens so the loop never terminates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) --nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) flush_dcache_page(pages[nr_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) } while (nr_pages > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * ntfs_commit_pages_after_non_resident_write - commit the received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * @pages: array of destination pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * @nr_pages: number of pages in @pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * @pos: byte position in file at which the write begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * @bytes: number of bytes to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * See description of ntfs_commit_pages_after_write(), below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static inline int ntfs_commit_pages_after_non_resident_write(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct page **pages, const unsigned nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) s64 pos, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) s64 end, initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ntfs_inode *ni, *base_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ntfs_attr_search_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) MFT_RECORD *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ATTR_RECORD *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) unsigned blocksize, u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) vi = pages[0]->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) blocksize = vi->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) end = pos + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) s64 bh_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) bool partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) page = pages[u];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) bh_pos = (s64)page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) partial = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) s64 bh_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) bh_end = bh_pos + blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (bh_end <= pos || bh_pos >= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) partial = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * If all buffers are now uptodate but the page is not, set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * page uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!partial && !PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) } while (++u < nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Finally, if we do not need to update initialized_size or i_size we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * are finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (end <= initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Update initialized_size/i_size as appropriate, both in the inode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * the mft record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* Map, pin, and lock the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) BUG_ON(!NInoNonResident(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) BUG_ON(!a->non_resident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) BUG_ON(end > ni->allocated_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ni->initialized_size = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) a->data.non_resident.initialized_size = cpu_to_sle64(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (end > i_size_read(vi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) i_size_write(vi, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) a->data.non_resident.data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) a->data.non_resident.initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Mark the mft record dirty, so it gets written back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) "code %i).", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (err != -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) NVolSetErrors(ni->vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * ntfs_commit_pages_after_write - commit the received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * @pages: array of destination pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * @nr_pages: number of pages in @pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * @pos: byte position in file at which the write begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * @bytes: number of bytes to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * locked but not kmap()ped. The source data has already been copied into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * the data was copied (for non-resident attributes only) and it returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * Need to set uptodate and mark dirty all buffers within the boundary of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * write. If all buffers in a page are uptodate we set the page uptodate, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * Setting the buffers dirty ensures that they get written out later when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * ntfs_writepage() is invoked by the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * Finally, we need to update i_size and initialized_size as appropriate both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * in the inode and the mft record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * This is modelled after fs/buffer.c::generic_commit_write(), which marks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * buffers uptodate and dirty, sets the page uptodate if all buffers in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * page are uptodate, and updates i_size if the end of io is beyond i_size. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * that case, it also marks the inode dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * If things have gone as outlined in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * content modifications here for non-resident attributes. For resident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * attributes we need to do the uptodate bringing here which we combine with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * the copying into the mft record which means we save one atomic kmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * Return 0 on success or -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int ntfs_commit_pages_after_write(struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) const unsigned nr_pages, s64 pos, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) s64 end, initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct inode *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ntfs_inode *ni, *base_ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ntfs_attr_search_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) MFT_RECORD *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ATTR_RECORD *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) char *kattr, *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) u32 attr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) BUG_ON(!nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) BUG_ON(!pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) page = pages[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) BUG_ON(!page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) vi = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) vi->i_ino, ni->type, page->index, nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) (long long)pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (NInoNonResident(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return ntfs_commit_pages_after_non_resident_write(pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) nr_pages, pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) BUG_ON(nr_pages > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * Attribute is resident, implying it is not compressed, encrypted, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * sparse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!NInoAttr(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) base_ni = ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) base_ni = ni->ext.base_ntfs_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) BUG_ON(NInoNonResident(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* Map, pin, and lock the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) m = map_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (IS_ERR(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) err = PTR_ERR(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) ctx = ntfs_attr_get_search_ctx(base_ni, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) CASE_SENSITIVE, 0, NULL, 0, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) a = ctx->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) BUG_ON(a->non_resident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* The total length of the attribute value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) attr_len = le32_to_cpu(a->data.resident.value_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) i_size = i_size_read(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) BUG_ON(attr_len != i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) BUG_ON(pos > attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) end = pos + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) BUG_ON(end > le32_to_cpu(a->length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) le16_to_cpu(a->data.resident.value_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Copy the received data from the page to the mft record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) memcpy(kattr + pos, kaddr + pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* Update the attribute length if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (end > attr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) attr_len = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) a->data.resident.value_length = cpu_to_le32(attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * If the page is not uptodate, bring the out of bounds area(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * uptodate by copying data from the mft record to the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (pos > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) memcpy(kaddr, kattr, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (end < attr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) memcpy(kaddr + end, kattr + end, attr_len - end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* Zero the region outside the end of the attribute value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* Update initialized_size/i_size if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) BUG_ON(end > ni->allocated_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) BUG_ON(initialized_size != i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (end > initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) write_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) ni->initialized_size = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) i_size_write(vi, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) write_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /* Mark the mft record dirty, so it gets written back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) flush_dcache_mft_record_page(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) mark_mft_record_dirty(ctx->ntfs_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) ntfs_warning(vi->i_sb, "Error allocating memory required to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) "commit the write.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) ntfs_warning(vi->i_sb, "Page is uptodate, setting "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) "dirty so the write will be retried "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) "later on by the VM.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * Put the page on mapping->dirty_pages, but leave its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * buffers' dirty state as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) ntfs_error(vi->i_sb, "Page is not uptodate. Written "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) "data has been lost.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ntfs_error(vi->i_sb, "Resident attribute commit write failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) "with error %i.", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) NVolSetErrors(ni->vol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) ntfs_attr_put_search_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) unmap_mft_record(base_ni);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Copy as much as we can into the pages and return the number of bytes which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * were successfully copied. If a fault is encountered then clear the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * out to (ofs + bytes) and return the number of bytes which were copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) unsigned ofs, struct iov_iter *i, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct page **last_page = pages + nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) size_t total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct iov_iter data = *i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) unsigned len, copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) len = PAGE_SIZE - ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (len > bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) total += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) bytes -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) iov_iter_advance(&data, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (copied < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) } while (++pages < last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* Zero the rest of the target like __copy_from_user(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) len = PAGE_SIZE - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (len > bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) zero_user(*pages, copied, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) bytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) } while (++pages < last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * ntfs_perform_write - perform buffered write to a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * @file: file to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * @i: iov_iter with data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * @pos: byte offset in file at which to begin writing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct address_space *mapping = file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct inode *vi = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) ntfs_inode *ni = NTFS_I(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) ntfs_volume *vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct page *cached_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) VCN last_vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ssize_t status, written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) unsigned nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) "0x%llx, count 0x%lx.", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) (unsigned)le32_to_cpu(ni->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) (unsigned long long)pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) (unsigned long)iov_iter_count(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * If a previous ntfs_truncate() failed, repeat it and abort if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * fails again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (unlikely(NInoTruncateFailed(ni))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) inode_dio_wait(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) err = ntfs_truncate(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (err || NInoTruncateFailed(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ntfs_error(vol->sb, "Cannot perform write to inode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) "0x%lx, attribute type 0x%x, because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) "ntfs_truncate() failed (error code "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) "%i).", vi->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) (unsigned)le32_to_cpu(ni->type), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * Determine the number of pages per cluster for non-resident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) nr_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) nr_pages = vol->cluster_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) last_vcn = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) VCN vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) pgoff_t idx, start_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) unsigned ofs, do_pages, u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) size_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) start_idx = idx = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) ofs = pos & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) bytes = PAGE_SIZE - ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) do_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (nr_pages > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) vcn = pos >> vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (vcn != last_vcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) last_vcn = vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * Get the lcn of the vcn the write is in. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * it is a hole, need to lock down all pages in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) vol->cluster_size_bits, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (unlikely(lcn < LCN_HOLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (lcn == LCN_ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ntfs_error(vol->sb, "Cannot "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) "perform write to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) "inode 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) "attribute type 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) "because the attribute "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) "is corrupt.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) vi->i_ino, (unsigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) le32_to_cpu(ni->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (lcn == LCN_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) start_idx = (pos & ~(s64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) vol->cluster_size_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) bytes = vol->cluster_size - (pos &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) vol->cluster_size_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) do_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (bytes > iov_iter_count(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) bytes = iov_iter_count(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * Bring in the user page(s) that we will copy from _first_.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * Otherwise there is a nasty deadlock on copying from the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * page(s) as we are writing to, without it/them being marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * up-to-date. Note, at present there is nothing to stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * pages being swapped out between us bringing them into memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * and doing the actual copying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /* Get and lock @do_pages starting at index @start_idx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) pages, &cached_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * For non-resident attributes, we need to fill any holes with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * actual clusters and ensure all bufferes are mapped. We also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * need to bring uptodate any buffers that are only partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * being written to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (NInoNonResident(ni)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) status = ntfs_prepare_pages_for_non_resident_write(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) pages, do_pages, pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (unlikely(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) unlock_page(pages[--do_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) put_page(pages[do_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) } while (do_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) u = (pos >> PAGE_SHIFT) - pages[0]->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) i, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) ntfs_flush_dcache_pages(pages + u, do_pages - u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (likely(copied == bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) status = ntfs_commit_pages_after_write(pages, do_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) pos, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) status = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) unlock_page(pages[--do_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) put_page(pages[do_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) } while (do_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (unlikely(status < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) copied = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (unlikely(!copied)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) size_t sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * We failed to copy anything. Fall back to single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * segment length write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * This is needed to avoid possible livelock in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * case that all segments in the iov cannot be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * at once without a pagefault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) sc = iov_iter_single_seg_count(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (bytes > sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) bytes = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) iov_iter_advance(i, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) pos += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) written += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) balance_dirty_pages_ratelimited(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) status = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) } while (iov_iter_count(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (cached_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) put_page(cached_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) written ? "written" : "status", (unsigned long)written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) (long)status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return written ? written : status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * @iocb: IO state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * @from: iov_iter with data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * Basically the same as generic_file_write_iter() except that it ends up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * up calling ntfs_perform_write() instead of generic_perform_write() and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * O_DIRECT is not implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct inode *vi = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ssize_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) inode_lock(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* We can write back this queue in page reclaim. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) current->backing_dev_info = inode_to_bdi(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) err = ntfs_prepare_file_for_write(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (iov_iter_count(from) && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) written = ntfs_perform_write(file, from, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) current->backing_dev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) inode_unlock(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) iocb->ki_pos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (likely(written > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) written = generic_write_sync(iocb, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return written ? written : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * ntfs_file_fsync - sync a file to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * @filp: file to be synced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * @datasync: if non-zero only flush user data and not metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * system calls. This function is inspired by fs/buffer.c::file_fsync().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * If @datasync is false, write the mft record and all associated extent mft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * records as well as the $DATA attribute and then sync the block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * If @datasync is true and the attribute is non-resident, we skip the writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * of the mft record and all associated extent mft records (this might still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * happen due to the write_inode_now() call).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * Also, if @datasync is true, we do not wait on the inode to be written out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * but we always wait on the page cache pages to be written out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * Locking: Caller must hold i_mutex on the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * TODO: We should probably also write all attribute/index inodes associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * with this inode but since we have no simple way of getting to them we ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * this problem for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct inode *vi = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) err = file_write_and_wait_range(filp, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) inode_lock(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) BUG_ON(S_ISDIR(vi->i_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (!datasync || !NInoNonResident(NTFS_I(vi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) ret = __ntfs_write_inode(vi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) write_inode_now(vi, !datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * NOTE: If we were to use mapping->private_list (see ext2 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * fs/buffer.c) for dirty blocks then we could optimize the below to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * sync_mapping_buffers(vi->i_mapping).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) err = sync_blockdev(vi->i_sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (unlikely(err && !ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (likely(!ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ntfs_debug("Done.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) "%u.", datasync ? "data" : "", vi->i_ino, -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) inode_unlock(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) const struct file_operations ntfs_file_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) .read_iter = generic_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) .write_iter = ntfs_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) .fsync = ntfs_file_fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) .mmap = generic_file_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) .open = ntfs_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) const struct inode_operations ntfs_file_inode_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) #ifdef NTFS_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) .setattr = ntfs_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) #endif /* NTFS_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) const struct file_operations ntfs_empty_file_ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) const struct inode_operations ntfs_empty_inode_ops = {};