^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002, 2004 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "file.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "journal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "suballoc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "symlink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "refcounttree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "namei.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "sysfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct ocfs2_dinode *fe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct buffer_head *buffer_cache_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) trace_ocfs2_symlink_get_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (unsigned long long)iblock, bh_result, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) BUG_ON(ocfs2_inode_is_fast_symlink(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) (unsigned long long)iblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) status = ocfs2_read_inode_block(inode, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) fe = (struct ocfs2_dinode *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) le32_to_cpu(fe->i_clusters))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mlog(ML_ERROR, "block offset is outside the allocated size: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "%llu\n", (unsigned long long)iblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* We don't use the page cache to create symlink data, so if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * need be, copy it over from the buffer cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) iblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) buffer_cache_bh = sb_getblk(osb->sb, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!buffer_cache_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mlog(ML_ERROR, "couldn't getblock for symlink!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* we haven't locked out transactions, so a commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * could've happened. Since we've got a reference on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the bh, even if it commits while we're doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * copy, the data is still good. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (buffer_jbd(buffer_cache_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) && ocfs2_inode_is_new(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kaddr = kmap_atomic(bh_result->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!kaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mlog(ML_ERROR, "couldn't kmap!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) memcpy(kaddr + (bh_result->b_size * iblock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) buffer_cache_bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bh_result->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) set_buffer_uptodate(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) brelse(buffer_cache_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) map_bh(bh_result, inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) down_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ret = ocfs2_get_block(inode, iblock, bh_result, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) up_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int ocfs2_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 p_blkno, count, past_eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) (unsigned long long)iblock, bh_result, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) inode, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* this always does I/O for some reason. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) (unsigned long long)p_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (max_blocks < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) count = max_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * ocfs2 never allocates in this function - the only time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * need to use BH_New is when we're extending i_size on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * system which doesn't support holes, in which case BH_New
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * allows __block_write_begin() to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * If we see this on a sparse file system, then a truncate has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * raced us and removed the cluster. In this case, we clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * the buffers dirty and uptodate bits and let the buffer code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * ignore it as a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) clear_buffer_dirty(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) clear_buffer_uptodate(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Treat the unwritten extent as a hole for zeroing purposes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) map_bh(bh_result, inode->i_sb, p_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bh_result->b_size = count << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!ocfs2_sparse_alloc(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (p_blkno == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mlog(ML_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (unsigned long long)iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) (unsigned long long)p_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) (unsigned long long)past_eof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (create && (iblock >= past_eof))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int ocfs2_read_inline_data(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) loff_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (size > PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) "Inode %llu has with inline data has bad size: %Lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) (unsigned long long)size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) memcpy(kaddr, di->id2.i_data.id_data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Clear the remaining part of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) memset(kaddr + size, 0, PAGE_SIZE - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ret = ocfs2_read_inode_block(inode, &di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = ocfs2_read_inline_data(inode, page, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int ocfs2_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) loff_t start = (loff_t)page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int ret, unlock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) (page ? page->index : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (ret == AOP_TRUNCATED_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unlock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Unlock the page and cycle ip_alloc_sem so that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * busyloop waiting for ip_alloc_sem to unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret = AOP_TRUNCATED_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unlock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) down_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) up_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) goto out_inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * i_size might have just been updated as we grabed the meta lock. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * might now be discovering a truncate that hit on another node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * block_read_full_page->get_block freaks out if it is asked to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * beyond the end of a file, so we check here. Callers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * (generic_file_read, vm_ops->fault) are clever enough to check i_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * and notice that the page they just read isn't needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * XXX sys_readahead() seems to get that wrong?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (start >= i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto out_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret = ocfs2_readpage_inline(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ret = block_read_full_page(page, ocfs2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unlock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) out_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) up_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) out_inode_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * This is used only for read-ahead. Failures or difficult to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * situations are safe to ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Right now, we don't bother with BH_Boundary - in-inode extent lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * are quite large (243 extents on 4k blocks), so most inodes don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * grow out to a tree. If need be, detecting boundary extents could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * trivially be added in a future version of ocfs2_get_block().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void ocfs2_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct inode *inode = rac->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Use the nonblocking flag for the dlm code to avoid page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * lock inversion, but don't bother with retrying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (down_read_trylock(&oi->ip_alloc_sem) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Don't bother with inline-data. There isn't anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * to read-ahead in that case anyway...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Check whether a remote node truncated this file - we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * drop out in that case as it's not worth handling here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (readahead_pos(rac) >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) mpage_readahead(rac, ocfs2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) out_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) up_read(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Note: Because we don't support holes, our allocation has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * already happened (allocation writes zeros to the file data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * so we don't have to worry about ordered writes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * ocfs2_writepage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * ->writepage is called during the process of invalidating the page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * during blocked lock processing. It can't block on any cluster locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * to during block mapping. It's relying on the fact that the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * mapping can't have disappeared under the dirty pages that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * being asked to write back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) trace_ocfs2_writepage(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return block_write_full_page(page, ocfs2_get_block, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Taken from ext3. We don't necessarily need the full blown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * functionality yet, but IMHO it's better to cut and paste the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * thing so we can avoid introducing our own bugs (and easily pick up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * their fixes when they happen) --Mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int walk_page_buffers( handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct buffer_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int *partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int (*fn)( handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct buffer_head *bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned block_start, block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned blocksize = head->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct buffer_head *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) for ( bh = head, block_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ret == 0 && (bh != head || !block_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) block_start = block_end, bh = next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) block_end = block_start + blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (block_end <= from || block_start >= to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (partial && !buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *partial = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) err = (*fn)(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) sector_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u64 p_blkno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) (unsigned long long)block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * The swap code (ab-)uses ->bmap to get a block mapping and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * bypasseѕ the file system for actual I/O. We really can't allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * that on refcounted inodes, so we have to skip out here. And yes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * 0 is the magic code for a bmap error..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ocfs2_is_refcount_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* We don't need to lock journal system files, since they aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * accessed concurrently from multiple nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!INODE_JOURNAL(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err = ocfs2_inode_lock(inode, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) down_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!INODE_JOURNAL(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) up_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) (unsigned long long)block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) status = err ? 0 : p_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static int ocfs2_releasepage(struct page *page, gfp_t wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return try_to_free_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned int cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) cluster_start = cpos % cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) cluster_start = cluster_start << osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cluster_end = cluster_start + osb->s_clustersize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) BUG_ON(cluster_start > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) BUG_ON(cluster_end > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *start = cluster_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *end = cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * 'from' and 'to' are the region in the page to avoid zeroing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * If pagesize > clustersize, this function will avoid zeroing outside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * of the cluster boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * from == to == 0 is code for "zero the entire cluster region"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static void ocfs2_clear_page_regions(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct ocfs2_super *osb, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned from, unsigned to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned int cluster_start, cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (from || to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (from > cluster_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) memset(kaddr + cluster_start, 0, from - cluster_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (to < cluster_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) memset(kaddr + to, 0, cluster_end - to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Nonsparse file systems fully allocate before we get to the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * code. This prevents ocfs2_write() from tagging the write as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * allocating one, which means ocfs2_map_page_blocks() might try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * read-in the blocks at the tail of our file. Avoid reading them by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * testing i_size against each block offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned int block_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u64 offset = page_offset(page) + block_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (i_size_read(inode) > offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Some of this taken from __block_write_begin(). We already have our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * mapping by now though, and the entire write will be allocating or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * it won't, so not much need to use BH_New.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * This will also skip zeroing, which is handled externally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct inode *inode, unsigned int from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unsigned int to, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned int block_end, block_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned int bsize = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) create_empty_buffers(page, bsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for (bh = head, block_start = 0; bh != head || !block_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bh = bh->b_this_page, block_start += bsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) block_end = block_start + bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Ignore blocks outside of our i/o range -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * they may belong to unallocated clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (block_start >= to || block_end <= from) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * For an allocating write with cluster size >= page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * size, we always write the entire page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) set_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!buffer_mapped(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) map_bh(bh, inode->i_sb, *p_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) clean_bdev_bh_alias(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) !buffer_new(bh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ocfs2_should_read_blk(inode, page, block_start) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) (block_start < from || block_end > to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ll_rw_block(REQ_OP_READ, 0, 1, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *wait_bh++=bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *p_blkno = *p_blkno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * If we issued read requests - let them complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) while(wait_bh > wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) wait_on_buffer(*--wait_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!buffer_uptodate(*wait_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (ret == 0 || !new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * If we get -EIO above, zero out any newly allocated blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * to avoid exposing stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bh = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) block_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) block_end = block_start + bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (block_end <= from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) goto next_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (block_start >= to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) zero_user(page, block_start, bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) next_bh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) block_start = block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #define OCFS2_MAX_CTXT_PAGES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct ocfs2_unwritten_extent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct list_head ue_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct list_head ue_ip_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u32 ue_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u32 ue_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Describe the state of a single cluster to be written to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct ocfs2_write_cluster_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) u32 c_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) u32 c_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Give this a unique field because c_phys eventually gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unsigned c_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned c_clear_unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned c_needs_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct ocfs2_write_ctxt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Logical cluster position / len of write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u32 w_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u32 w_clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* First cluster allocated in a nonsparse extend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u32 w_first_new_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Type of caller. Must be one of buffer, mmap, direct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ocfs2_write_type_t w_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * This is true if page_size > cluster_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * It triggers a set of special cases during write which might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * have to deal with allocating writes to partial pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) unsigned int w_large_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Pages involved in this write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * w_target_page is the page being written to by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * w_pages is an array of pages which always contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * w_target_page, and in the case of an allocating write with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * page_size < cluster size, it will contain zero'd and mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * pages adjacent to w_target_page which need to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * out in so that future reads from that region will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * zero's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned int w_num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct page *w_target_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * w_target_locked is used for page_mkwrite path indicating no unlocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * against w_target_page in ocfs2_write_end_nolock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned int w_target_locked:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * ocfs2_write_end() uses this to know what the real range to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * write in the target should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int w_target_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int w_target_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * We could use journal_current_handle() but this is cleaner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * IMHO -Mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) handle_t *w_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct buffer_head *w_di_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct ocfs2_cached_dealloc_ctxt w_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct list_head w_unwritten_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) unsigned int w_unwritten_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) for(i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) unlock_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) mark_page_accessed(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) put_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * w_target_locked is only set to true in the page_mkwrite() case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * The intent is to allow us to lock the target page from write_begin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * to write_end(). The caller must hold a ref on w_target_page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (wc->w_target_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) BUG_ON(!wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) for (i = 0; i < wc->w_num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (wc->w_target_page == wc->w_pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) wc->w_pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mark_page_accessed(wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) put_page(wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static void ocfs2_free_unwritten_list(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) list_for_each_entry_safe(ue, tmp, head, ue_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) list_del(&ue->ue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) list_del(&ue->ue_ip_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) kfree(ue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static void ocfs2_free_write_ctxt(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ocfs2_unlock_pages(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) brelse(wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) kfree(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct ocfs2_super *osb, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) unsigned len, ocfs2_write_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) u32 cend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct ocfs2_write_ctxt *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) wc->w_cpos = pos >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) wc->w_first_new_cpos = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) cend = (pos + len - 1) >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) wc->w_clen = cend - wc->w_cpos + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) get_bh(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) wc->w_di_bh = di_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) wc->w_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) wc->w_large_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) wc->w_large_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) INIT_LIST_HEAD(&wc->w_unwritten_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *wcp = wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * If a page has any new buffers, zero them out here, and mark them uptodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * and dirty so they'll be written out (in order to prevent uninitialised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * block data from leaking). And clear the new bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned int block_start, block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct buffer_head *head, *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) block_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) block_end = block_start + bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (buffer_new(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (block_end > from && block_start < to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) start = max(from, block_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) end = min(to, block_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) zero_user_segment(page, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) clear_buffer_new(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) block_start = block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) } while (bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * Only called when we have a failure during allocating write to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * zero's to the newly allocated region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void ocfs2_write_failure(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) loff_t user_pos, unsigned user_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) unsigned from = user_pos & (PAGE_SIZE - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) to = user_pos + user_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct page *tmppage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (wc->w_target_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ocfs2_zero_new_buffers(wc->w_target_page, from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) for(i = 0; i < wc->w_num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) tmppage = wc->w_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (tmppage && page_has_buffers(tmppage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ocfs2_should_order_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) user_pos, user_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) block_commit_write(tmppage, from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct page *page, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) loff_t user_pos, unsigned user_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) unsigned int map_from = 0, map_to = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned int cluster_start, cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) unsigned int user_data_from = 0, user_data_to = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) &cluster_start, &cluster_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* treat the write as new if the a hole/lseek spanned across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * the page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) new = new | ((i_size_read(inode) <= page_offset(page)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) (page_offset(page) <= user_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (page == wc->w_target_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) map_from = user_pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) map_to = map_from + user_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ret = ocfs2_map_page_blocks(page, p_blkno, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) cluster_start, cluster_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ret = ocfs2_map_page_blocks(page, p_blkno, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) map_from, map_to, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) user_data_from = map_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) user_data_to = map_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) map_from = cluster_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) map_to = cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * If we haven't allocated the new page yet, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * shouldn't be writing it out without copying user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * data. This is likely a math error from the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) BUG_ON(!new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) map_from = cluster_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) map_to = cluster_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ret = ocfs2_map_page_blocks(page, p_blkno, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) cluster_start, cluster_end, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Parts of newly allocated pages need to be zero'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Above, we have also rewritten 'to' and 'from' - as far as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * the rest of the function is concerned, the entire cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * range inside of a page needs to be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * We can skip this if the page is up to date - it's already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * been zero'd from being read in as a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (new && !PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) cpos, user_data_from, user_data_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * This function will only grab one clusters worth of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int ocfs2_grab_pages_for_write(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u32 cpos, loff_t user_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) unsigned user_len, int new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct page *mmap_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unsigned long start, target_index, end_index, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) loff_t last_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) target_index = user_pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Figure out how many pages we'll be manipulating here. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * non allocating write, we just change the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * page. Otherwise, we'll need a whole clusters worth. If we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * writing past i_size, we only need enough pages to cover the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * last page of the write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * We need the index *past* the last page we could possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * touch. This is the page past the end of the write or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * i_size, whichever is greater.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) last_byte = max(user_pos + user_len, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) BUG_ON(last_byte < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if ((start + wc->w_num_pages) > end_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) wc->w_num_pages = end_index - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) wc->w_num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) start = target_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) for(i = 0; i < wc->w_num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) index = start + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (index >= target_index && index <= end_index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) wc->w_type == OCFS2_WRITE_MMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * ocfs2_pagemkwrite() is a little different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * and wants us to directly use the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) lock_page(mmap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Exit and let the caller retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (mmap_page->mapping != mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) WARN_ON(mmap_page->mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) unlock_page(mmap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) get_page(mmap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) wc->w_pages[i] = mmap_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) wc->w_target_locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) } else if (index >= target_index && index <= end_index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) wc->w_type == OCFS2_WRITE_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Direct write has no mapping page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) wc->w_pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) wc->w_pages[i] = find_or_create_page(mapping, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!wc->w_pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) wait_for_stable_page(wc->w_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (index == target_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) wc->w_target_page = wc->w_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) wc->w_target_locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * Prepare a single cluster for write one cluster into the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int ocfs2_write_cluster(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) u32 *phys, unsigned int new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned int clear_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) unsigned int should_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct ocfs2_alloc_context *data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct ocfs2_write_ctxt *wc, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) loff_t user_pos, unsigned user_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) u64 p_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) u32 tmp_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * This is safe to call with the page locks - it won't take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * any additional semaphores or cluster locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) tmp_pos = cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) &tmp_pos, 1, !clear_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) wc->w_di_bh, wc->w_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) data_ac, meta_ac, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * This shouldn't happen because we must have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * calculated the correct meta data allocation required. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * internal tree allocation code should know how to increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * transaction credits itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * If need be, we could handle -EAGAIN for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * RESTART_TRANS here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mlog_bug_on_msg(ret == -EAGAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) "Inode %llu: EAGAIN return during allocation.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) } else if (clear_unwritten) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ret = ocfs2_mark_extent_written(inode, &et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) wc->w_handle, cpos, 1, *phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) meta_ac, &wc->w_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * The only reason this should fail is due to an inability to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * find the extent added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) "at logical cluster %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) BUG_ON(*phys == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!should_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) for(i = 0; i < wc->w_num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int tmpret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* This is the direct io target page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (wc->w_pages[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) p_blkno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) wc->w_pages[i], cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) user_pos, user_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) should_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (tmpret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) mlog_errno(tmpret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ret = tmpret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * We only have cleanup to do in case of allocating write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (ret && new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ocfs2_write_failure(inode, wc, user_pos, user_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct ocfs2_alloc_context *data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) loff_t pos, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) loff_t cluster_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) unsigned int local_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct ocfs2_write_cluster_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) for (i = 0; i < wc->w_clen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) desc = &wc->w_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * We have to make sure that the total write passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * doesn't extend past a single cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) local_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) cluster_off = pos & (osb->s_clustersize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if ((cluster_off + local_len) > osb->s_clustersize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) local_len = osb->s_clustersize - cluster_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = ocfs2_write_cluster(mapping, &desc->c_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) desc->c_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) desc->c_clear_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) desc->c_needs_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) data_ac, meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) wc, desc->c_cpos, pos, local_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) len -= local_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) pos += local_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * ocfs2_write_end() wants to know which parts of the target page it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * should complete the write on. It's easiest to compute them ahead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * time when a more complete view of the write is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) loff_t pos, unsigned len, int alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct ocfs2_write_cluster_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) wc->w_target_from = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) wc->w_target_to = wc->w_target_from + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (alloc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Allocating write - we may have different boundaries based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * on page size and cluster size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * NOTE: We can no longer compute one value from the other as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * the actual write length and user provided length may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (wc->w_large_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * We only care about the 1st and last cluster within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * our range and whether they should be zero'd or not. Either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * value may be extended out to the start/end of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * newly allocated cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) desc = &wc->w_desc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (desc->c_needs_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ocfs2_figure_cluster_boundaries(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) desc->c_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) &wc->w_target_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) desc = &wc->w_desc[wc->w_clen - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (desc->c_needs_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) ocfs2_figure_cluster_boundaries(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) desc->c_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) &wc->w_target_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) wc->w_target_from = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) wc->w_target_to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * do the zero work. And should not to clear UNWRITTEN since it will be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * by the direct io procedure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * If this is a new extent that allocated by direct io, we should mark it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * the ip_unwritten_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static int ocfs2_unwritten_check(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct ocfs2_write_cluster_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (!desc->c_needs_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* Needs not to zero no metter buffer or direct. The one who is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * the cluster is doing zero. And he will clear unwritten after all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * cluster io finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (desc->c_cpos == ue->ue_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) BUG_ON(desc->c_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) desc->c_needs_zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) desc->c_clear_unwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (wc->w_type != OCFS2_WRITE_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (new == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) new = kmalloc(sizeof(struct ocfs2_unwritten_extent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (new == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /* This direct write will doing zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) new->ue_cpos = desc->c_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) new->ue_phys = desc->c_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) desc->c_clear_unwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) list_add_tail(&new->ue_node, &wc->w_unwritten_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) wc->w_unwritten_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * Populate each single-cluster write descriptor in the write context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * with information about the i/o to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * Returns the number of clusters that will have to be allocated, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * well as a worst case estimate of the number of extent records that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * would have to be created during a write to an unwritten region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static int ocfs2_populate_write_desc(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct ocfs2_write_ctxt *wc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) unsigned int *clusters_to_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) unsigned int *extents_to_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct ocfs2_write_cluster_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) unsigned int num_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) unsigned int ext_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) *clusters_to_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *extents_to_split = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) for (i = 0; i < wc->w_clen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) desc = &wc->w_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) desc->c_cpos = wc->w_cpos + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (num_clusters == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * Need to look up the next extent record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* We should already CoW the refcountd extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * Assume worst case - that we're writing in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * the middle of the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * We can assume that the write proceeds from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * left to right, in which case the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * insert code is smart enough to coalesce the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * next splits into the previous records created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (ext_flags & OCFS2_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) *extents_to_split = *extents_to_split + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) } else if (phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * Only increment phys if it doesn't describe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) phys++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * file that got extended. w_first_new_cpos tells us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * where the newly allocated clusters are so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * zero them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (desc->c_cpos >= wc->w_first_new_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) BUG_ON(phys == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) desc->c_needs_zero = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) desc->c_phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (phys == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) desc->c_new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) desc->c_needs_zero = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) desc->c_clear_unwritten = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) *clusters_to_alloc = *clusters_to_alloc + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (ext_flags & OCFS2_EXT_UNWRITTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) desc->c_clear_unwritten = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) desc->c_needs_zero = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ret = ocfs2_unwritten_check(inode, wc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) num_clusters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int ocfs2_write_begin_inline(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) page = find_or_create_page(mapping, 0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * If we don't set w_num_pages then this page won't get unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * and freed on cleanup of the write context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) wc->w_pages[0] = wc->w_target_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) wc->w_num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ocfs2_set_inode_data_inline(inode, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) wc->w_handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct inode *inode, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned len, struct page *mmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int ret, written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) loff_t end = pos + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct ocfs2_dinode *di = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) len, (unsigned long long)pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * Handle inodes which already have inline data 1st.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (mmap_page == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ocfs2_size_fits_inline_data(wc->w_di_bh, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) goto do_inline_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * The write won't fit - we have to give this inode an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * inline extent list now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * Check whether the inode can accept inline data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * Check whether the write can fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (mmap_page ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) do_inline_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ret = ocfs2_write_begin_inline(mapping, inode, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * This signals to the caller that the data can be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) written = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return written ? written : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * This function only does anything for file systems which can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * handle sparse files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * What we want to do here is fill in any hole between the current end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * of allocation and the end of our write. That way the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * write path can treat it as an non-allocating write, which has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * special case code for sparse/nonsparse files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static int ocfs2_expand_nonsparse_inode(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) loff_t pos, unsigned len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) loff_t newsize = pos + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (newsize <= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* There is no wc if this is call from direct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) wc->w_first_new_cpos =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (pos > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) ret = ocfs2_zero_extend(inode, di_bh, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) int ocfs2_write_begin_nolock(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) loff_t pos, unsigned len, ocfs2_write_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct page **pagep, void **fsdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct buffer_head *di_bh, struct page *mmap_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct ocfs2_write_ctxt *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct ocfs2_alloc_context *data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) int try_free = 1, ret1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (ocfs2_supports_inline_data(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) mmap_page, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /* Direct io change i_size late, should not zero tail here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (type != OCFS2_WRITE_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (ocfs2_sparse_alloc(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = ocfs2_zero_tail(inode, di_bh, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) len, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ret = ocfs2_check_range_for_refcount(inode, pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) } else if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) clusters_need = wc->w_clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ret = ocfs2_refcount_cow(inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) wc->w_cpos, wc->w_clen, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) &extents_to_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) clusters_need += clusters_to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) trace_ocfs2_write_begin_nolock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) (long long)i_size_read(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) le32_to_cpu(di->i_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) pos, len, type, mmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) clusters_to_alloc, extents_to_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * We set w_target_from, w_target_to here so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * ocfs2_write_end() knows which range in the target page to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * write out. An allocation requires that we write the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * cluster range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (clusters_to_alloc || extents_to_split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * XXX: We are stretching the limits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * ocfs2_lock_allocators(). It greatly over-estimates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * the work to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ret = ocfs2_lock_allocators(inode, &et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) clusters_to_alloc, extents_to_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) &data_ac, &meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (data_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) credits = ocfs2_calc_extend_credits(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) &di->id2.i_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) } else if (type == OCFS2_WRITE_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /* direct write needs not to start trans if no extents alloc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * We have to zero sparse allocated clusters, unwritten extent clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * and non-sparse clusters we just extended. For non-sparse writes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * we know zeros will only be needed in the first and/or last cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) wc->w_desc[wc->w_clen - 1].c_needs_zero))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) cluster_of_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) cluster_of_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) wc->w_handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (clusters_to_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) ret = dquot_alloc_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) goto out_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * Fill our page array first. That way we've grabbed enough so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * that we can zero and flush if we error after adding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) cluster_of_pages, mmap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (ret && ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto out_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * the target page. In this case, we exit with no error and no target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * page. This will trigger the caller, page_mkwrite(), to re-try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) BUG_ON(wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto out_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto out_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (data_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) ocfs2_free_alloc_context(data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (pagep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) *pagep = wc->w_target_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *fsdata = wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) out_quota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (clusters_to_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) dquot_free_space(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * even in case of error here like ENOSPC and ENOMEM. So, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * to unlock the target page manually to prevent deadlocks when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * to VM code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (wc->w_target_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) unlock_page(mmap_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) ocfs2_free_write_ctxt(inode, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (data_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) ocfs2_free_alloc_context(data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (ret == -ENOSPC && try_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * Try to free some truncate log so that we can have enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * clusters to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) try_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (ret1 == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (ret1 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) mlog_errno(ret1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ret = ocfs2_inode_lock(inode, &di_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * Take alloc sem here to prevent concurrent lookups. That way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * the mapping, zeroing and tree manipulation within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * ocfs2_write() will be safe against ->readpage(). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * should also serve to lock out allocation from a shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * writeable region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) pagep, fsdata, di_bh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) unsigned len, unsigned *copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct ocfs2_dinode *di,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct ocfs2_write_ctxt *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (unlikely(*copied < len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (!PageUptodate(wc->w_target_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) *copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) kaddr = kmap_atomic(wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) trace_ocfs2_write_end_inline(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) (unsigned long long)pos, *copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) le16_to_cpu(di->id2.i_data.id_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) le16_to_cpu(di->i_dyn_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) int ocfs2_write_end_nolock(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) loff_t pos, unsigned len, unsigned copied, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) unsigned from, to, start = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct ocfs2_write_ctxt *wc = fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) handle_t *handle = wc->w_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct page *tmppage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) BUG_ON(!list_empty(&wc->w_unwritten_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) copied = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) goto out_write_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (unlikely(copied < len) && wc->w_target_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (!PageUptodate(wc->w_target_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) start+len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (wc->w_target_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) flush_dcache_page(wc->w_target_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) for(i = 0; i < wc->w_num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) tmppage = wc->w_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* This is the direct io target page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (tmppage == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (tmppage == wc->w_target_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) from = wc->w_target_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) to = wc->w_target_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) BUG_ON(from > PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) to > PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) to < from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * Pages adjacent to the target (if any) imply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * a hole-filling write in which case we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * to flush their entire range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) from = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (page_has_buffers(tmppage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (handle && ocfs2_should_order_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) loff_t start_byte =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ((loff_t)tmppage->index << PAGE_SHIFT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) loff_t length = to - from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ocfs2_jbd2_inode_add_write(handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) start_byte, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) block_commit_write(tmppage, from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) out_write_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* Direct io do not update i_size here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (wc->w_type != OCFS2_WRITE_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) pos += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (pos > i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) i_size_write(inode, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) di->i_size = cpu_to_le64((u64)i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) ocfs2_journal_dirty(handle, wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* unlock pages before dealloc since it needs acquiring j_trans_barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * lock, or it will cause a deadlock since journal commit threads holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * this lock and will ask for the page lock when flushing the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * put it here to preserve the unlock order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ocfs2_unlock_pages(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ocfs2_run_deallocs(osb, &wc->w_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) brelse(wc->w_di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) kfree(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static int ocfs2_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct ocfs2_dio_write_ctxt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct list_head dw_zero_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) unsigned dw_zero_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) int dw_orphaned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) pid_t dw_writer_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static struct ocfs2_dio_write_ctxt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct ocfs2_dio_write_ctxt *dwc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (bh->b_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (dwc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) INIT_LIST_HEAD(&dwc->dw_zero_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) dwc->dw_zero_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) dwc->dw_orphaned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) dwc->dw_writer_pid = task_pid_nr(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) bh->b_private = dwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) *alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return dwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) static void ocfs2_dio_free_write_ctx(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct ocfs2_dio_write_ctxt *dwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) kfree(dwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * TODO: Make this into a generic get_blocks function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * From do_direct_io in direct-io.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * "So what we do is to permit the ->get_blocks function to populate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * bh.b_size with the size of IO which is permitted at this offset and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * this i_blkbits."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * This function is called directly from get_more_blocks in direct-io.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * called like this: dio->get_blocks(dio->inode, fs_startblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * fs_count, map_bh, dio->rw == WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct ocfs2_write_ctxt *wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct ocfs2_write_cluster_desc *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct ocfs2_dio_write_ctxt *dwc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) u64 p_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) loff_t pos = iblock << i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) unsigned len, total_len = bh_result->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int ret = 0, first_get_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) len = min(total_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * bh_result->b_size is count in get_more_blocks according to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * "pos" and "end", we need map twice to return different buffer state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * 1. area in file size, not set NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * 2. area out file size, set NEW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * iblock endblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * |--------|---------|---------|---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * |<-------area in file------->|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if ((iblock <= endblk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ((iblock + ((len - 1) >> i_blkbits)) > endblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) len = (endblk - iblock + 1) << i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) mlog(0, "get block of %lu at %llu:%u req %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) inode->i_ino, pos, len, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * Because we need to change file size in ocfs2_dio_end_io_write(), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * we may need to add it to orphan dir. So can not fall to fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * while file size will be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (pos + total_len <= i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /* This is the fast path for re-write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (buffer_mapped(bh_result) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) !buffer_new(bh_result) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* Clear state set by ocfs2_get_block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) bh_result->b_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (unlikely(dwc == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) !dwc->dw_orphaned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) * when we are going to alloc extents beyond file size, add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) * inode to orphan dir, so we can recall those spaces when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) * system crashed during write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) ret = ocfs2_add_inode_to_orphan(osb, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) dwc->dw_orphaned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) ret = ocfs2_inode_lock(inode, &di_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) down_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (first_get_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (ocfs2_sparse_alloc(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) ret = ocfs2_zero_tail(inode, di_bh, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) total_len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) OCFS2_WRITE_DIRECT, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) (void **)&wc, di_bh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) desc = &wc->w_desc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) BUG_ON(p_blkno == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) map_bh(bh_result, inode->i_sb, p_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) bh_result->b_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (desc->c_needs_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (iblock > endblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /* May sleep in end_io. It should not happen in a irq context. So defer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * it to dio work queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) set_buffer_defer_completion(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (!list_empty(&wc->w_unwritten_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct ocfs2_unwritten_extent *ue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) ue = list_first_entry(&wc->w_unwritten_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct ocfs2_unwritten_extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) ue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) BUG_ON(ue->ue_cpos != desc->c_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /* The physical address may be 0, fill it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ue->ue_phys = desc->c_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) dwc->dw_zero_count += wc->w_unwritten_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) BUG_ON(ret != len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static int ocfs2_dio_end_io_write(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct ocfs2_dio_write_ctxt *dwc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ssize_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) struct ocfs2_unwritten_extent *ue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) struct ocfs2_alloc_context *data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) loff_t end = offset + bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int ret = 0, credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) /* We do clear unwritten, delete orphan, change i_size here. If neither
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * of these happen, we can skip all this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (list_empty(&dwc->dw_zero_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) end <= i_size_read(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) !dwc->dw_orphaned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) ret = ocfs2_inode_lock(inode, &di_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) down_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) /* Delete orphan before acquire i_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (dwc->dw_orphaned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) end = end > i_size_read(inode) ? end : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) !!end, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* Attach dealloc with extent tree in case that we may reuse extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * which are already unlinked from current extent tree due to extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * rotation and merging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) et.et_dealloc = &dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) &data_ac, &meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) goto commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) ret = ocfs2_mark_extent_written(inode, &et, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) ue->ue_cpos, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) ue->ue_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) meta_ac, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (end > i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (data_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) ocfs2_free_alloc_context(data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) ocfs2_dio_free_write_ctx(inode, dwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * particularly interested in the aio/dio case. We use the rw_lock DLM lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * to protect io on one node from truncation on another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static int ocfs2_dio_end_io(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) ssize_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /* this io's submitter should not have unlocked this before we could */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (bytes <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) (long long)bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (bytes > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ret = ocfs2_dio_end_io_write(inode, private, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) ocfs2_dio_free_write_ctx(inode, private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) ocfs2_iocb_clear_rw_locked(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) level = ocfs2_iocb_rw_locked_level(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) ocfs2_rw_unlock(inode, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) get_block_t *get_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * Fallback to buffered I/O if we see an inode without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* Fallback to buffered I/O if we do not support append dio. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (iocb->ki_pos + iter->count > i_size_read(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) !ocfs2_supports_append_dio(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (iov_iter_rw(iter) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) get_block = ocfs2_lock_get_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) get_block = ocfs2_dio_wr_get_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) iter, get_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ocfs2_dio_end_io, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) const struct address_space_operations ocfs2_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) .readpage = ocfs2_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) .readahead = ocfs2_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) .writepage = ocfs2_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) .write_begin = ocfs2_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) .write_end = ocfs2_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) .bmap = ocfs2_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) .direct_IO = ocfs2_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) .invalidatepage = block_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) .releasepage = ocfs2_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) .migratepage = buffer_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) .is_partially_uptodate = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) };