^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * File open, close, extend, truncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2002, 2004 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/falloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "file.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "sysfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "ioctl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "journal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "locks.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "mmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "suballoc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "refcounttree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int ocfs2_init_file_private(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct ocfs2_file_private *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) fp->fp_file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mutex_init(&fp->fp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ocfs2_file_lock_res_init(&fp->fp_flock, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) file->private_data = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void ocfs2_free_file_private(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct ocfs2_file_private *fp = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ocfs2_lock_res_free(&fp->fp_flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) kfree(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int ocfs2_file_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int mode = file->f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) trace_ocfs2_file_open(inode, file, file->f_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) file->f_path.dentry->d_name.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) file->f_path.dentry->d_name.name, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (file->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) status = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Check that the inode hasn't been wiped from disk by another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * node. If it hasn't then we're safe as long as we hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * spin lock until our increment of open count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (oi->ip_flags & OCFS2_INODE_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) status = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (mode & O_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) oi->ip_open_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) status = ocfs2_init_file_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * We want to set open count back if we're failing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) oi->ip_open_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) file->f_mode |= FMODE_NOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int ocfs2_file_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!--oi->ip_open_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) trace_ocfs2_file_release(inode, file, file->f_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) file->f_path.dentry->d_name.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) file->f_path.dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) oi->ip_open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ocfs2_free_file_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int ocfs2_dir_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ocfs2_init_file_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int ocfs2_dir_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ocfs2_free_file_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) journal_t *journal = osb->journal->j_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) tid_t commit_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool needs_barrier = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) file->f_path.dentry->d_name.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) file->f_path.dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (unsigned long long)datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) err = file_write_and_wait_range(file, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (journal->j_flags & JBD2_BARRIER &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) !jbd2_trans_will_send_data_barrier(journal, commit_tid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) needs_barrier = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) err = jbd2_complete_transaction(journal, commit_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (needs_barrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return (err < 0) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int ocfs2_should_update_atime(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct vfsmount *vfsmnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct timespec64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if ((inode->i_flags & S_NOATIME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * We can be called with no vfsmnt structure - NFSD will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * sometimes do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Note that our action here is different than touch_atime() -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * if we can't tell whether this is a noatime mount, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * don't know whether to trust the value of s_atime_quantum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (vfsmnt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (vfsmnt->mnt_flags & MNT_RELATIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) now = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int ocfs2_update_inode_atime(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Don't use ocfs2_mark_inode_dirty() here as we don't always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * have i_mutex to guard against concurrent changes to other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * inode fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) inode->i_atime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ocfs2_update_inode_fsync_trans(handle, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int ocfs2_set_inode_size(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct buffer_head *fe_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u64 new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) i_size_write(inode, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) inode->i_ctime = inode->i_mtime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int ocfs2_simple_size_update(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u64 new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = ocfs2_set_inode_size(handle, inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ocfs2_update_inode_fsync_trans(handle, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int ocfs2_cow_file_pos(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct buffer_head *fe_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned int num_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned int ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * If the new offset is aligned to the range of the cluster, there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * no space for ocfs2_zero_range_for_truncate to fill, so no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * CoW either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) status = ocfs2_get_clusters(inode, cpos, &phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct buffer_head *fe_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u64 new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u64 cluster_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * We need to CoW the cluster contains the offset if it is reflinked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * since we will call ocfs2_zero_range_for_truncate later which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * write "0" from offset to the end of the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* TODO: This needs to actually orphan the inode in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * transaction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Do this before setting i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) cluster_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) i_size_write(inode, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) inode->i_ctime = inode->i_mtime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) di = (struct ocfs2_dinode *) fe_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) di->i_size = cpu_to_le64(new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ocfs2_update_inode_fsync_trans(handle, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ocfs2_journal_dirty(handle, fe_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int ocfs2_truncate_file(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u64 new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct ocfs2_dinode *fe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* We trust di_bh because it comes from ocfs2_inode_lock(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * already validated it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) fe = (struct ocfs2_dinode *) di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) (unsigned long long)le64_to_cpu(fe->i_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) (unsigned long long)new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) "Inode %llu, inode i_size = %lld != di "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) "i_size = %llu, i_flags = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) i_size_read(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) (unsigned long long)le64_to_cpu(fe->i_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) le32_to_cpu(fe->i_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (new_i_size > le64_to_cpu(fe->i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) trace_ocfs2_truncate_file_error(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) (unsigned long long)le64_to_cpu(fe->i_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) (unsigned long long)new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ocfs2_resv_discard(&osb->osb_la_resmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &OCFS2_I(inode)->ip_la_data_resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * The inode lock forced other nodes to sync and drop their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * pages, which (correctly) happens even if we have a truncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * without allocation change - ocfs2 cluster sizes can be much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * greater than page size, so we have to truncate them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unmap_mapping_range(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) new_i_size + PAGE_SIZE - 1, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) truncate_inode_pages(inode->i_mapping, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) i_size_read(inode), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto bail_unlock_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* alright, we're going to need to do a full blown alloc size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * change. Orphan the inode so that recovery can complete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * truncate if necessary. This does the task of marking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * i_size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto bail_unlock_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) truncate_inode_pages(inode->i_mapping, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) status = ocfs2_commit_truncate(osb, inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto bail_unlock_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* TODO: orphan dir cleanup here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bail_unlock_sem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!status && OCFS2_I(inode)->ip_clusters == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) status = ocfs2_try_remove_refcount_tree(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * extend file allocation only here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * we'll update all the disk stuff, and oip->alloc_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * expect stuff to be locked, a transaction started and enough data /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * metadata reservations in the contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Will return -EAGAIN, and a reason if a restart is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * If passed in, *reason will always be set, even in error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int ocfs2_add_inode_data(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) u32 *logical_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 clusters_to_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int mark_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct buffer_head *fe_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ocfs2_alloc_context *data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) enum ocfs2_alloc_restarted *reason_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) clusters_to_add, mark_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) data_ac, meta_ac, reason_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u32 clusters_to_add, int mark_unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int restart_func = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u32 prev_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct ocfs2_dinode *fe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct ocfs2_alloc_context *data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) enum ocfs2_alloc_restarted why = RESTART_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int did_quota = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Unwritten extent only exists for file systems which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * support holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) status = ocfs2_read_inode_block(inode, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) fe = (struct ocfs2_dinode *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) restart_all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) &data_ac, &meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) restarted_transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) trace_ocfs2_extend_allocation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) (unsigned long long)i_size_read(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) le32_to_cpu(fe->i_clusters), clusters_to_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) why, restart_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) status = dquot_alloc_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) did_quota = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* reserve a write to the file entry early on - that we if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * run out of credits in the allocation path, we can still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * update i_size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) prev_clusters = OCFS2_I(inode)->ip_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) status = ocfs2_add_inode_data(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) &logical_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) clusters_to_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mark_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &why);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if ((status < 0) && (status != -EAGAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (status != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock(&OCFS2_I(inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock(&OCFS2_I(inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Release unused quota reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dquot_free_space(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) did_quota = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (why != RESTART_NONE && clusters_to_add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (why == RESTART_META) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) restart_func = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) BUG_ON(why != RESTART_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) status = ocfs2_allocate_extend_trans(handle, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* handle still has to be committed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * this point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto restarted_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) le32_to_cpu(fe->i_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) (unsigned long long)le64_to_cpu(fe->i_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) OCFS2_I(inode)->ip_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) (unsigned long long)i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (status < 0 && did_quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dquot_free_space(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (data_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ocfs2_free_alloc_context(data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if ((!status) && restart_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) restart_func = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto restart_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * While a write will already be ordering the data, a truncate will not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Thus, we need to explicitly order the zeroed pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) loff_t start_byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) loff_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!ocfs2_should_order_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) handle = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Some parts of this taken from generic_cont_expand, which turned out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * to be too fragile to do exactly what we need without us having to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * worry about recursive locking in ->write_begin() and ->write_end(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u64 abs_to, struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned long index = abs_from >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned zero_from, zero_to, block_start, block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) BUG_ON(abs_from >= abs_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) BUG_ON(abs_from & (inode->i_blkbits - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) abs_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) abs_to - abs_from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) page = find_or_create_page(mapping, index, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto out_commit_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Get the offsets within the page that we want to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) zero_from = abs_from & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) zero_to = abs_to & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!zero_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) zero_to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) trace_ocfs2_write_zero_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) (unsigned long long)abs_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) (unsigned long long)abs_to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) index, zero_from, zero_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* We know that zero_from is block aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (block_start = zero_from; block_start < zero_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) block_start = block_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) block_end = block_start + i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * block_start is block-aligned. Bump it by one to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * __block_write_begin and block_commit_write to zero the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * whole block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = __block_write_begin(page, block_start + 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ocfs2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* must not update i_size! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = block_commit_write(page, block_start + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) block_start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * fs-writeback will release the dirty pages without page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * whose offset are over inode size, the release happens at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * block_write_full_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) i_size_write(inode, abs_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) di->i_size = cpu_to_le64((u64)i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) di->i_mtime_nsec = di->i_ctime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) out_commit_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Find the next range to zero. We do this in terms of bytes because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * that's what ocfs2_zero_extend() wants, and it is dealing with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * pagecache. We may return multiple extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * needs to be zeroed. range_start and range_end return the next zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * range. A subsequent call should pass the previous range_end as its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * zero_start. If range_end is 0, there's nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Unwritten extents are skipped over. Refcounted extents are CoWd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static int ocfs2_zero_extend_get_range(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) u64 zero_start, u64 zero_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u64 *range_start, u64 *range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int rc = 0, needs_cow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u32 p_cpos, zero_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 zero_cpos =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) unsigned int num_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) unsigned int ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) while (zero_cpos < last_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) zero_clusters = num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (ext_flags & OCFS2_EXT_REFCOUNTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) needs_cow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) zero_cpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!zero_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *range_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) while ((zero_cpos + zero_clusters) < last_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) &p_cpos, &num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ext_flags & OCFS2_EXT_REFCOUNTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) needs_cow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) zero_clusters += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if ((zero_cpos + zero_clusters) > last_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) zero_clusters = last_cpos - zero_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (needs_cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) zero_clusters, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) zero_cpos + zero_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * has made sure that the entire range needs zeroing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u64 range_end, struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u64 next_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) u64 zero_pos = range_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) trace_ocfs2_zero_extend_range(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) (unsigned long long)range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) (unsigned long long)range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) BUG_ON(range_start >= range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) while (zero_pos < range_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (next_pos > range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) next_pos = range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) zero_pos = next_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * Very large extends have the potential to lock up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * the cpu for extended periods of time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) loff_t zero_to_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) u64 zero_start, range_start = 0, range_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) (unsigned long long)zero_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) (unsigned long long)i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) while (zero_start < zero_to_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) zero_to_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) &range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) &range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* Trim the ends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (range_start < zero_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) range_start = zero_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (range_end > zero_to_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) range_end = zero_to_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ret = ocfs2_zero_extend_range(inode, range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) range_end, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) zero_start = range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) u64 new_i_size, u64 zero_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) u32 clusters_to_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * Only quota files call this without a bh, and they can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * refcounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (clusters_to_add < oi->ip_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) clusters_to_add = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) clusters_to_add -= oi->ip_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (clusters_to_add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) clusters_to_add, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * Call this even if we don't add any clusters to the tree. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * still need to zero the area between the old i_size and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * new i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = ocfs2_zero_extend(inode, di_bh, zero_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int ocfs2_extend_file(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) u64 new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) BUG_ON(!di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* setattr sometimes calls us like this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (new_i_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (i_size_read(inode) == new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) BUG_ON(new_i_size < i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * The alloc sem blocks people in read/write from reading our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * allocation until we're done changing it. We depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * i_mutex to block other extend/truncate calls while we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * here. We even have to hold it for sparse files because there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * might be some tail zeroing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) down_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * We can optimize small extends by keeping the inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * inline data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) goto out_update_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) out_update_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int status = 0, size_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int inode_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct dquot *transfer_to[MAXQUOTAS] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int qtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct ocfs2_lock_holder oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) trace_ocfs2_setattr(inode, dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dentry->d_name.len, dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) attr->ia_valid, attr->ia_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) from_kuid(&init_user_ns, attr->ia_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) from_kgid(&init_user_ns, attr->ia_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* ensuring we don't even attempt to truncate a symlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (S_ISLNK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) attr->ia_valid &= ~ATTR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) | ATTR_GID | ATTR_UID | ATTR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) status = setattr_prepare(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (is_quota_modification(inode, attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) status = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (size_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * Here we should wait dio to finish before inode lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * to avoid a deadlock between ocfs2_setattr() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * ocfs2_dio_end_io_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) status = ocfs2_rw_lock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (had_lock < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) status = had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) goto bail_unlock_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) } else if (had_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * As far as we know, ocfs2_setattr() could only be the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * VFS entry point in the call chain of recursive cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * locking issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * For instance:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * chmod_common()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * notify_change()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * ocfs2_setattr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * posix_acl_chmod()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * ocfs2_iop_get_acl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * But, we're not 100% sure if it's always true, because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * ordering of the VFS entry points in the call chain is out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * of our control. So, we'd better dump the stack here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * catch the other cases of recursive locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) mlog(ML_ERROR, "Another case of recursive locking:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) inode_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (size_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) status = inode_newsize_ok(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (i_size_read(inode) >= attr->ia_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (ocfs2_should_order_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) status = ocfs2_begin_ordered_truncate(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) status = ocfs2_truncate_file(inode, bh, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) status = ocfs2_extend_file(inode, bh, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (status != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Gather pointers to quota structures so that allocation /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * freeing of quota structures happens here and not inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * dquot_transfer() where we have problems with lock ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (IS_ERR(transfer_to[USRQUOTA])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) status = PTR_ERR(transfer_to[USRQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) transfer_to[USRQUOTA] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (IS_ERR(transfer_to[GRPQUOTA])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) status = PTR_ERR(transfer_to[GRPQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) transfer_to[GRPQUOTA] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 2 * ocfs2_quota_trans_credits(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) goto bail_unlock_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) status = __dquot_transfer(inode, transfer_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto bail_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto bail_unlock_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) setattr_copy(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) status = ocfs2_mark_inode_dirty(handle, inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) bail_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) bail_unlock_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) bail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (status && inode_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) inode_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bail_unlock_rw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (size_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ocfs2_rw_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Release quota pointers in case we acquired them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dqput(transfer_to[qtype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!status && attr->ia_valid & ATTR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) status = ocfs2_acl_chmod(inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (inode_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) int ocfs2_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) u32 request_mask, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct inode *inode = d_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct super_block *sb = path->dentry->d_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct ocfs2_super *osb = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) err = ocfs2_inode_revalidate(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) generic_fillattr(inode, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * If there is inline data in the inode, the inode will normally not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * have data blocks allocated (it may have an external xattr block).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * Report at least one sector for such files, so tools like tar, rsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * others don't incorrectly think the file is completely sparse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) stat->blocks += (stat->size + 511)>>9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* We set the blksize from the cluster size for performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) stat->blksize = osb->s_clustersize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int ocfs2_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) int ret, had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct ocfs2_lock_holder oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (mask & MAY_NOT_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (had_lock < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ret = had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) } else if (had_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* See comments in ocfs2_setattr() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * The call chain of this case could be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * do_sys_open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * may_open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * inode_permission()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * ocfs2_permission()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * ocfs2_iop_get_acl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) mlog(ML_ERROR, "Another case of recursive locking:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ret = generic_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int __ocfs2_write_remove_suid(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) trace_ocfs2_write_remove_suid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) goto out_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) inode->i_mode &= ~S_ISUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) inode->i_mode &= ~S_ISGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) di = (struct ocfs2_dinode *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) di->i_mode = cpu_to_le16(inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ocfs2_update_inode_fsync_trans(handle, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) out_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static int ocfs2_write_remove_suid(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ret = ocfs2_read_inode_block(inode, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ret = __ocfs2_write_remove_suid(inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Allocate enough extents to cover the region starting at byte offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * start for len bytes. Existing extents are skipped, any extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * added are marked as "unwritten".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static int ocfs2_allocate_unwritten_extents(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) u32 cpos, phys_cpos, clusters, alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) u64 end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ret = ocfs2_read_inode_block(inode, &di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * Nothing to do if the requested reservation range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * fits within the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ocfs2_size_fits_inline_data(di_bh, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * We consider both start and len to be inclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) clusters -= cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) while (clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) &alloc_size, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * Hole or existing extent len can be arbitrary, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * cap it to our own allocation request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (alloc_size > clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) alloc_size = clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (phys_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * We already have an allocation at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * region so we can safely skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) cpos += alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) clusters -= alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * Truncate a byte range, avoiding pages within partial clusters. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * preserves those pages for the zeroing code to write to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) u64 byte_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) loff_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) end = byte_start + byte_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) end = end & ~(osb->s_clustersize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) unmap_mapping_range(mapping, start, end - start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) truncate_inode_pages_range(mapping, start, end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * zero out partial blocks of one cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * start: file offset where zero starts, will be made upper block aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * len: it will be trimmed to the end of current cluster if "start + len"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * is bigger than it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static int ocfs2_zeroout_partial_cluster(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u64 start_block, end_block, nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) u64 p_block, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) u32 cluster, p_cluster, nr_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) u64 end = ocfs2_align_bytes_to_clusters(sb, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (start + len < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) start_block = ocfs2_blocks_for_bytes(sb, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) end_block = ocfs2_blocks_for_bytes(sb, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) nr_blocks = end_block - start_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cluster = ocfs2_bytes_to_clusters(sb, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) &nr_clusters, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!p_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static int ocfs2_zero_partial_clusters(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) u64 tmpend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) u64 end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) unsigned int csize = osb->s_clustersize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) loff_t isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * The "start" and "end" values are NOT necessarily part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * the range whose allocation is being deleted. Rather, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * is what the user passed in with the request. We must zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * partial clusters here. There's no need to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * physical allocation - the zeroing code knows to skip holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) trace_ocfs2_zero_partial_clusters(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) (unsigned long long)start, (unsigned long long)end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * If both edges are on a cluster boundary then there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * zeroing required as the region is part of the allocation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * be truncated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /* No page cache for EOF blocks, issue zero out to disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (end > isize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * zeroout eof blocks in last cluster starting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * "isize" even "start" > "isize" because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * complicated to zeroout just at "start" as "start"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * may be not aligned with block size, buffer write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * would be required to do that, but out of eof buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * write is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) ret = ocfs2_zeroout_partial_cluster(inode, isize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) end - isize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (start >= isize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) end = isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * If start is on a cluster boundary and end is somewhere in another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * cluster, we have not COWed the cluster starting at start, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * end is also within the same cluster. So, in this case, we skip this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * first call to ocfs2_zero_range_for_truncate() truncate and move on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * to the next one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if ((start & (csize - 1)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * We want to get the byte offset of the end of the 1st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) tmpend = (u64)osb->s_clustersize +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) (start & ~(osb->s_clustersize - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (tmpend > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) tmpend = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) trace_ocfs2_zero_partial_clusters_range1(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) (unsigned long long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) (unsigned long long)tmpend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) ret = ocfs2_zero_range_for_truncate(inode, handle, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) tmpend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (tmpend < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * This may make start and end equal, but the zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * code will skip any work in that case so there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * need to catch it up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) start = end & ~(osb->s_clustersize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) trace_ocfs2_zero_partial_clusters_range2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) (unsigned long long)start, (unsigned long long)end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct ocfs2_extent_rec *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (le32_to_cpu(rec->e_cpos) < pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * Helper to calculate the punching pos and length in one run, we handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * following three cases in order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * - remove the entire record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * - remove a partial record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * - no record needs to be removed (hole-punching completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static void ocfs2_calc_trunc_pos(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct ocfs2_extent_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) u32 trunc_start, u32 *trunc_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) u32 *trunc_len, u32 *trunc_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) u64 *blkno, int *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) u32 coff, range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * remove an entire extent record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) *trunc_cpos = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * Skip holes if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (range < *trunc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) *trunc_end = range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) *blkno = le64_to_cpu(rec->e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) *trunc_end = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) } else if (range > trunc_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * remove a partial extent record, which means we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * removing the last extent record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) *trunc_cpos = trunc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * skip hole if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (range < *trunc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) *trunc_end = range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) *trunc_len = *trunc_end - trunc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) coff = trunc_start - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) *blkno = le64_to_cpu(rec->e_blkno) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) ocfs2_clusters_to_blocks(inode->i_sb, coff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) *trunc_end = trunc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * It may have two following possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * - last record has been removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * - trunc_start was within a hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * both two cases mean the completion of hole punching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) *done = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int ocfs2_remove_inode_range(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct buffer_head *di_bh, u64 byte_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) u64 byte_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) int ret = 0, flags = 0, done = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) u32 cluster_in_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct ocfs2_extent_list *el = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct ocfs2_extent_rec *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) trace_ocfs2_remove_inode_range(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) (unsigned long long)byte_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) (unsigned long long)byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (byte_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) byte_start + byte_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * There's no need to get fancy with the page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * truncate of an inline-data inode. We're talking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * about less than a page here, which will be cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * in the dinode buffer anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) unmap_mapping_range(mapping, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) truncate_inode_pages(mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * For reflinks, we may need to CoW 2 clusters which might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * partially zero'd later, if hole's start and end offset were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) * within one cluster(means is not exactly aligned to clustersize).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (ocfs2_is_refcount_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) cluster_in_el = trunc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) path = ocfs2_new_path_from_et(&et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) while (trunc_end > trunc_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) ret = ocfs2_find_path(INODE_CACHE(inode), path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) cluster_in_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) i = ocfs2_find_rec(el, trunc_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * Need to go to previous extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (path->p_tree_depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) &cluster_in_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * We've reached the leftmost extent block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * it's safe to leave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (cluster_in_el == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * The 'pos' searched for previous extent block is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * always one cluster less than actual trunc_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) trunc_end = cluster_in_el + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) &trunc_len, &trunc_end, &blkno, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) phys_cpos, trunc_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) &dealloc, refcount_loc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) cluster_in_el = trunc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * Parts of this function taken from xfs_change_file_space()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) loff_t f_pos, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct ocfs2_space_resv *sr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) int change_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) s64 llen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) loff_t size, orig_isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) unsigned long long max_off = inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * This prevents concurrent writes on other nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ret = ocfs2_rw_lock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) ret = ocfs2_inode_lock(inode, &di_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto out_rw_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) switch (sr->l_whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) case 0: /*SEEK_SET*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) case 1: /*SEEK_CUR*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) sr->l_start += f_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) case 2: /*SEEK_END*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) sr->l_start += i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) sr->l_whence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (sr->l_start < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) || sr->l_start > max_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) || (sr->l_start + llen) < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) || (sr->l_start + llen) > max_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) size = sr->l_start + sr->l_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (sr->l_len <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (file && should_remove_suid(file->f_path.dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) ret = __ocfs2_write_remove_suid(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) case OCFS2_IOC_RESVSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) case OCFS2_IOC_RESVSP64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * This takes unsigned offsets, but the signed ones we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * pass have been checked against overflow above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) sr->l_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) case OCFS2_IOC_UNRESVSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) case OCFS2_IOC_UNRESVSP64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) sr->l_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) orig_isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* zeroout eof blocks in the cluster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (!ret && change_size && orig_isize < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) size - orig_isize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) i_size_write(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * We update c/mtime for these changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) inode->i_ctime = inode->i_mtime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (file && (file->f_flags & O_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) handle->h_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) out_inode_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) out_rw_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ocfs2_rw_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) int ocfs2_change_file_space(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct ocfs2_space_resv *sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) !ocfs2_writes_unwritten_extents(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) !ocfs2_sparse_alloc(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (!(file->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) ret = mnt_want_write_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) mnt_drop_write_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct ocfs2_space_resv sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) int change_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) int cmd = OCFS2_IOC_RESVSP64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (!ocfs2_writes_unwritten_extents(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (mode & FALLOC_FL_KEEP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) change_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (mode & FALLOC_FL_PUNCH_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) cmd = OCFS2_IOC_UNRESVSP64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) sr.l_whence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) sr.l_start = (s64)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) sr.l_len = (s64)len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) change_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) unsigned int extent_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) u32 cpos, clusters, extent_len, phys_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) !ocfs2_is_refcount_inode(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) while (clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) &extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (extent_len > clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) extent_len = clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) clusters -= extent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) cpos += extent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) int blockmask = inode->i_sb->s_blocksize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) loff_t final_size = pos + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if ((pos & blockmask) || (final_size & blockmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct buffer_head **di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) int write_sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) ret = ocfs2_inode_lock(inode, di_bh, meta_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (write_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) down_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (write_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) brelse(*di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ocfs2_inode_unlock(inode, meta_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) struct buffer_head **di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) int meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) int write_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (write_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) up_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) brelse(*di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (meta_level >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ocfs2_inode_unlock(inode, meta_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) static int ocfs2_prepare_inode_for_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) loff_t pos, size_t count, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int ret = 0, meta_level = 0, overwrite_io = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) int write_sem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct dentry *dentry = file->f_path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) u32 clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * We start with a read level meta lock and only jump to an ex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * if we need to make modifications here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ret = ocfs2_inode_lock_for_extent_tree(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) &di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) write_sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * Check if IO will overwrite allocated blocks in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * IOCB_NOWAIT flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (!wait && !overwrite_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) overwrite_io = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* Clear suid / sgid if necessary. We do this here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * instead of later in the write path because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * remove_suid() calls ->setattr without any hint that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * we may have already done our cluster locking. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * ocfs2_setattr() *must* take cluster locks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * proceed, this will lead us to recursively lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * inode. There's also the dinode i_size state which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * can be lost via setattr during extending writes (we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * set inode->i_size at the end of a write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (should_remove_suid(dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (meta_level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ocfs2_inode_unlock_for_extent_tree(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) &di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) write_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) meta_level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) ret = ocfs2_write_remove_suid(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) ret = ocfs2_check_range_for_refcount(inode, pos, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) ocfs2_inode_unlock_for_extent_tree(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) &di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) write_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) meta_level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) write_sem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) ret = ocfs2_inode_lock_for_extent_tree(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) &di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) write_sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) clusters =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) pos, count, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ocfs2_inode_unlock_for_extent_tree(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) &di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) meta_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) write_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) int rw_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) ssize_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) size_t count = iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) int full_coherency = !(osb->s_mount_opt &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) OCFS2_MOUNT_COHERENCY_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) void *saved_ki_complete = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) int append_write = ((iocb->ki_pos + count) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) i_size_read(inode) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) file->f_path.dentry->d_name.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) file->f_path.dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) (unsigned int)from->nr_segs); /* GRRRRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (!direct_io && nowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (nowait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) if (!inode_trylock(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * Concurrent O_DIRECT writes are allowed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * mount_option "coherency=buffered".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * For append write, we must take rw EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) rw_level = (!direct_io || full_coherency || append_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (nowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) ret = ocfs2_try_rw_lock(inode, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ret = ocfs2_rw_lock(inode, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) * O_DIRECT writes with "coherency=full" need to take EX cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) * inode_lock to guarantee coherency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (direct_io && full_coherency) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) * We need to take and drop the inode lock to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * other nodes to drop their caches. Buffered I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * already does this in write_begin().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (nowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) ret = ocfs2_try_inode_lock(inode, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) ret = ocfs2_inode_lock(inode, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) ret = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) count = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (direct_io && !is_sync_kiocb(iocb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * Make it a sync io if it's an unaligned aio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) saved_ki_complete = xchg(&iocb->ki_complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) /* communicate with ocfs2_dio_end_io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) ocfs2_iocb_set_rw_locked(iocb, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) written = __generic_file_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) /* buffered aio wouldn't have proper lock coverage today */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) BUG_ON(written == -EIOCBQUEUED && !direct_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * function pointer which is called when o_direct io completes so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * it can unlock our rw lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * Unfortunately there are error cases which call end_io and others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * that don't. so we don't have to unlock the rw_lock if either an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * async dio is going to do it in the future or an end_io after an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) * error has already done it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) rw_level = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (unlikely(written <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (((file->f_flags & O_DSYNC) && !direct_io) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) IS_SYNC(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ret = filemap_fdatawrite_range(file->f_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) iocb->ki_pos - written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) iocb->ki_pos - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) written = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) ret = jbd2_journal_force_commit(osb->journal->j_journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) written = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) ret = filemap_fdatawait_range(file->f_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) iocb->ki_pos - written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) iocb->ki_pos - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (saved_ki_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) xchg(&iocb->ki_complete, saved_ki_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (rw_level != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) ocfs2_rw_unlock(inode, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) ret = written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) int ret = 0, rw_level = -1, lock_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct file *filp = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) filp->f_path.dentry->d_name.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) filp->f_path.dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) to->nr_segs); /* GRRRRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (!inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (!direct_io && nowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * buffered reads protect themselves in ->readpage(). O_DIRECT reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * need locks to protect pending reads from racing with truncate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (direct_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (nowait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) ret = ocfs2_try_rw_lock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) ret = ocfs2_rw_lock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) rw_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) /* communicate with ocfs2_dio_end_io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) ocfs2_iocb_set_rw_locked(iocb, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * We're fine letting folks race truncates and extending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * writes with read across the cluster, just like they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * locally. Hence no rw_lock during read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * Take and drop the meta data lock to update inode fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * like i_size. This allows the checks down below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) * generic_file_read_iter() a chance of actually working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) !nowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) ocfs2_inode_unlock(inode, lock_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) ret = generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) trace_generic_file_read_iter_ret(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /* buffered aio wouldn't have proper lock coverage today */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) BUG_ON(ret == -EIOCBQUEUED && !direct_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /* see ocfs2_file_write_iter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) rw_level = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (rw_level != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) ocfs2_rw_unlock(inode, rw_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* Refer generic_file_llseek_unlocked() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) switch (whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) case SEEK_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) case SEEK_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) /* SEEK_END requires the OCFS2 inode lock for the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) * because it references the file's size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ret = ocfs2_inode_lock(inode, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) offset += i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) case SEEK_CUR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (offset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) offset = file->f_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) offset += file->f_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) case SEEK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) case SEEK_HOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) struct file *file_out, loff_t pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) loff_t len, unsigned int remap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct inode *inode_in = file_inode(file_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct inode *inode_out = file_inode(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct buffer_head *in_bh = NULL, *out_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) bool same_inode = (inode_in == inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) loff_t remapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (!ocfs2_refcount_tree(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /* Lock both files against IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) /* Check file eligibility and prepare for block sharing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) &len, remap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (ret < 0 || len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) /* Lock out changes to the allocation maps and remap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (!same_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /* Zap any page cache for the destination file's range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) truncate_inode_pages_range(&inode_out->i_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) round_down(pos_out, PAGE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) round_up(pos_out + len, PAGE_SIZE) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) inode_out, out_bh, pos_out, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (!same_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (remapped < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) ret = remapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * Empty the extent map so that we may get the right extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * record from the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) ocfs2_extent_map_trunc(inode_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ocfs2_extent_map_trunc(inode_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) return remapped > 0 ? remapped : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) const struct inode_operations ocfs2_file_iops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) .setattr = ocfs2_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) .getattr = ocfs2_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) .permission = ocfs2_permission,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) .listxattr = ocfs2_listxattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) .fiemap = ocfs2_fiemap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) .get_acl = ocfs2_iop_get_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) .set_acl = ocfs2_iop_set_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) const struct inode_operations ocfs2_special_file_iops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) .setattr = ocfs2_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) .getattr = ocfs2_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) .permission = ocfs2_permission,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) .get_acl = ocfs2_iop_get_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) .set_acl = ocfs2_iop_set_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) const struct file_operations ocfs2_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) .llseek = ocfs2_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) .mmap = ocfs2_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) .fsync = ocfs2_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) .release = ocfs2_file_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) .open = ocfs2_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) .read_iter = ocfs2_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) .write_iter = ocfs2_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) .unlocked_ioctl = ocfs2_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) .compat_ioctl = ocfs2_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) .lock = ocfs2_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) .flock = ocfs2_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) .fallocate = ocfs2_fallocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) .remap_file_range = ocfs2_remap_file_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) const struct file_operations ocfs2_dops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) .read = generic_read_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) .iterate = ocfs2_readdir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) .fsync = ocfs2_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) .release = ocfs2_dir_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) .open = ocfs2_dir_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) .unlocked_ioctl = ocfs2_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) .compat_ioctl = ocfs2_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) .lock = ocfs2_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) .flock = ocfs2_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) * POSIX-lockless variants of our file_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) * These will be used if the underlying cluster stack does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) * posix file locking, if the user passes the "localflocks" mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * option, or if we have a local-only fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * ocfs2_flock is in here because all stacks handle UNIX file locks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * so we still want it in the case of no stack support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * plocks. Internally, it will do the right thing when asked to ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) const struct file_operations ocfs2_fops_no_plocks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) .llseek = ocfs2_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) .mmap = ocfs2_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) .fsync = ocfs2_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) .release = ocfs2_file_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) .open = ocfs2_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) .read_iter = ocfs2_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) .write_iter = ocfs2_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) .unlocked_ioctl = ocfs2_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) .compat_ioctl = ocfs2_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) .flock = ocfs2_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) .fallocate = ocfs2_fallocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) .remap_file_range = ocfs2_remap_file_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) const struct file_operations ocfs2_dops_no_plocks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) .read = generic_read_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) .iterate = ocfs2_readdir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) .fsync = ocfs2_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) .release = ocfs2_dir_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) .open = ocfs2_dir_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) .unlocked_ioctl = ocfs2_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) .compat_ioctl = ocfs2_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) .flock = ocfs2_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) };