^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2006 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2016-2018 Christoph Hellwig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_bmap_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_bmap_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_errortag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_trans_space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_inode_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_iomap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "xfs_dquot_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "xfs_dquot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "xfs_reflink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define XFS_ALLOC_ALIGN(mp, off) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) xfs_alert_fsblock_zero(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) xfs_inode_t *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) xfs_bmbt_irec_t *imap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "Access to block zero in inode %llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "start_block: %llx start_off: %llx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "blkcnt: %llx extent-state: %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (unsigned long long)ip->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) (unsigned long long)imap->br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) (unsigned long long)imap->br_startoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) (unsigned long long)imap->br_blockcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) imap->br_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) xfs_bmbt_to_iomap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct xfs_bmbt_irec *imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u16 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct xfs_buftarg *target = xfs_inode_buftarg(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return xfs_alert_fsblock_zero(ip, imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (imap->br_startblock == HOLESTARTBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) iomap->addr = IOMAP_NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) iomap->type = IOMAP_HOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } else if (imap->br_startblock == DELAYSTARTBLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) isnullstartblock(imap->br_startblock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) iomap->addr = IOMAP_NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) iomap->type = IOMAP_DELALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (imap->br_state == XFS_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) iomap->type = IOMAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) iomap->type = IOMAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) iomap->bdev = target->bt_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) iomap->dax_dev = target->bt_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) iomap->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (xfs_ipincount(ip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) iomap->flags |= IOMAP_F_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) xfs_hole_to_iomap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) xfs_fileoff_t offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) xfs_fileoff_t end_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct xfs_buftarg *target = xfs_inode_buftarg(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) iomap->addr = IOMAP_NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) iomap->type = IOMAP_HOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) iomap->bdev = target->bt_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) iomap->dax_dev = target->bt_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline xfs_fileoff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) xfs_iomap_end_fsb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) loff_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ASSERT(offset <= mp->m_super->s_maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return min(XFS_B_TO_FSB(mp, offset + count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static xfs_extlen_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) xfs_eof_alignment(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) xfs_extlen_t align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!XFS_IS_REALTIME_INODE(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Round up the allocation request to a stripe unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * (m_dalign) boundary if the file size is >= stripe unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * size, and we are allocating past the allocation eof.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * If mounted with the "-o swalloc" option the alignment is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * increased from the strip unit size to the stripe width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) align = mp->m_swidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) else if (mp->m_dalign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) align = mp->m_dalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Check if last_fsb is outside the last extent, and if so grow it to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * stripe unit boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) xfs_fileoff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) xfs_iomap_eof_align_last_fsb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) xfs_fileoff_t end_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) xfs_extlen_t align = xfs_eof_alignment(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct xfs_bmbt_irec irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ASSERT(ifp->if_flags & XFS_IFEXTENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Always round up the allocation request to the extent hint boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (extsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) align = roundup_64(align, extsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) align = extsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) xfs_iext_last(ifp, &icur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return aligned_end_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return end_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) xfs_iomap_write_direct(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) xfs_fileoff_t offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) xfs_fileoff_t count_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct xfs_bmbt_irec *imap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct xfs_trans *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) xfs_filblks_t resaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int nimaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int quota_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) uint qblocks, resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int resrtextents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int bmapi_flags = XFS_BMAPI_PREALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) uint tflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ASSERT(count_fsb > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) xfs_get_extsz_hint(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) resrtextents = qblocks = resaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) resrtextents /= mp->m_sb.sb_rextsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) quota_flag = XFS_QMOPT_RES_RTBLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) quota_flag = XFS_QMOPT_RES_REGBLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) error = xfs_qm_dqattach(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * For DAX, we do not allocate unwritten extents, but instead we zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * the block before we commit the transaction. Ideally we'd like to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * this outside the transaction context, but if we commit and then crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * we may not have zeroed the blocks and this will be exposed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * recovery of the allocation. Hence we must zero before commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Further, if we are mapping unwritten extents here, we need to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * and convert them to written so that we don't need an unwritten extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * callback for DAX. This also means that we need to be able to dip into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * the reserve block pool for bmbt block allocation if there is no space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * left but we need to do unwritten extent conversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (IS_DAX(VFS_I(ip))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (imap->br_state == XFS_EXT_UNWRITTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) tflags |= XFS_TRANS_RESERVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) tflags, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto out_trans_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) xfs_trans_ijoin(tp, ip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * From this point onwards we overwrite the imap pointer that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * caller gave to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) nimaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) imap, &nimaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out_res_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Complete the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Copy any maps to caller's array and return any error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (nimaps == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) error = xfs_alert_fsblock_zero(ip, imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) out_res_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) out_trans_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) STATIC bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xfs_quota_need_throttle(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) xfs_fsblock_t alloc_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!dq || !xfs_this_quota_on(ip->i_mount, type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* no hi watermark, no throttle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!dq->q_prealloc_hi_wmark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* under the lo watermark, no throttle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) xfs_quota_calc_throttle(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) xfs_dqtype_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) xfs_fsblock_t *qblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int *qshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int64_t *qfreesp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int64_t freesp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* no dq, or over hi wmark, squash the prealloc completely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *qblocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *qfreesp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) shift += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) shift += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (freesp < *qfreesp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *qfreesp = freesp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* only overwrite the throttle values if we are more aggressive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if ((freesp >> shift) < (*qblocks >> *qshift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *qblocks = freesp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *qshift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * If we don't have a user specified preallocation size, dynamically increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * the preallocation size as the size of the file grows. Cap the maximum size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * at a single extent or less if the filesystem is near full. The closer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * filesystem is to being full, the smaller the maximum preallocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) STATIC xfs_fsblock_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xfs_iomap_prealloc_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) loff_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct xfs_iext_cursor *icur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct xfs_iext_cursor ncur = *icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct xfs_bmbt_irec prev, got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int64_t freesp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) xfs_fsblock_t qblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) xfs_fsblock_t alloc_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) xfs_extlen_t plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int qshift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * As an exception we don't do any preallocation at all if the file is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * smaller than the minimum preallocation and we are using the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * dynamic preallocation scheme, as it is likely this is the only write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * to the file that is going to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Use the minimum preallocation size for small files or if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * writing right after a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) prev.br_startoff + prev.br_blockcount < offset_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return mp->m_allocsize_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Take the size of the preceding data extents as the basis for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * preallocation size. Note that we don't care if the previous extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * are written or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) plen = prev.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (plen > MAXEXTLEN / 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) isnullstartblock(got.br_startblock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) got.br_startoff + got.br_blockcount != prev.br_startoff ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) got.br_startblock + got.br_blockcount != prev.br_startblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) plen += got.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) prev = got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * If the size of the extents is greater than half the maximum extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * length, then use the current offset as the basis. This ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * for large files the preallocation size always extends to MAXEXTLEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * rather than falling short due to things like stripe unit/width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * alignment of real extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) alloc_blocks = plen * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (alloc_blocks > MAXEXTLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) alloc_blocks = XFS_B_TO_FSB(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) qblocks = alloc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * MAXEXTLEN is not a power of two value but we round the prealloc down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * to the nearest power of two value after throttling. To prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * round down from unconditionally reducing the maximum supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * prealloc size, we round up first, apply appropriate throttling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * round down and cap the value to MAXEXTLEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) alloc_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) freesp = percpu_counter_read_positive(&mp->m_fdblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Check each quota to cap the prealloc size, provide a shift value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * throttle with and adjust amount of available space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) &freesp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) &freesp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) &freesp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * The final prealloc size is set to the minimum of free space available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * in each of the quotas and the overall filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * The shift throttle value is set to the maximum value as determined by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * the global low free space values and per-quota low free space values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) alloc_blocks = min(alloc_blocks, qblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) shift = max(shift, qshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) alloc_blocks >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * rounddown_pow_of_two() returns an undefined result if we pass in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * alloc_blocks = 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (alloc_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) alloc_blocks = rounddown_pow_of_two(alloc_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (alloc_blocks > MAXEXTLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) alloc_blocks = MAXEXTLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * If we are still trying to allocate more space than is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * available, squash the prealloc hard. This can happen if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * have a large file on a small filesystem and the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * lowspace thresholds are smaller than MAXEXTLEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) while (alloc_blocks && alloc_blocks >= freesp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) alloc_blocks >>= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (alloc_blocks < mp->m_allocsize_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) alloc_blocks = mp->m_allocsize_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mp->m_allocsize_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return alloc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) xfs_iomap_write_unwritten(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) xfs_inode_t *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) xfs_off_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) xfs_off_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) bool update_isize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) xfs_mount_t *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) xfs_fileoff_t offset_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) xfs_filblks_t count_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xfs_filblks_t numblks_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int nimaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) xfs_trans_t *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) xfs_bmbt_irec_t imap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct inode *inode = VFS_I(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) xfs_fsize_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) uint resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) trace_xfs_unwritten_convert(ip, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Reserve enough blocks in this transaction for two complete extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * btree splits. We may be converting the middle part of an unwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * extent and in this case we will insert two new extents in the btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * each of which could cause a full split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * This reservation amount will be used in the first call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * xfs_bmbt_split() to select an AG with enough space to satisfy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * rest of the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Attach dquots so that bmbt splits are accounted correctly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) error = xfs_qm_dqattach(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * Set up a transaction to convert the range of extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * from unwritten to real. Do allocations in a loop until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * we have covered the range passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Note that we can't risk to recursing back into the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * here as we might be asked to write out the same inode that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * complete here and might deadlock on the iolock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) XFS_TRANS_RESERVE, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) xfs_trans_ijoin(tp, ip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto error_on_bmapi_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Modify the unwritten extent state of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) nimaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) XFS_BMAPI_CONVERT, resblks, &imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) &nimaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto error_on_bmapi_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Log the updated inode size as we go. We have to be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * to only log it up to the actual write offset if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * halfway into a block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (i_size > offset + count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) i_size = offset + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (update_isize && i_size > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) i_size_write(inode, i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) i_size = xfs_new_eof(ip, i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ip->i_d.di_size = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return xfs_alert_fsblock_zero(ip, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if ((numblks_fsb = imap.br_blockcount) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * The numblks_fsb value should always get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * smaller, otherwise the loop is stuck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ASSERT(imap.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) offset_fsb += numblks_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) count_fsb -= numblks_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) } while (count_fsb > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) error_on_bmapi_transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) imap_needs_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct xfs_bmbt_irec *imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int nimaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* don't allocate blocks when just zeroing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (flags & IOMAP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!nimaps ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) imap->br_startblock == HOLESTARTBLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) imap->br_startblock == DELAYSTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* we convert unwritten extents before copying the data for DAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) imap_needs_cow(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct xfs_bmbt_irec *imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int nimaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!xfs_is_cow_inode(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* when zeroing we don't have to COW holes or unwritten extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (flags & IOMAP_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!nimaps ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) imap->br_startblock == HOLESTARTBLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) imap->br_state == XFS_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) xfs_ilock_for_iomap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned *lockmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) unsigned mode = XFS_ILOCK_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * COW writes may allocate delalloc space or convert unwritten COW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * extents, so we need to make sure to take the lock exclusively here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (xfs_is_cow_inode(ip) && is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mode = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Extents not yet cached requires exclusive access, don't block. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * is an opencoded xfs_ilock_data_map_shared() call but with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * non-blocking behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (flags & IOMAP_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) mode = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) relock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (flags & IOMAP_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!xfs_ilock_nowait(ip, mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) xfs_ilock(ip, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * The reflink iflag could have changed since the earlier unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * check, so if we got ILOCK_SHARED for a write and but we're now a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * reflink inode we have to switch to ILOCK_EXCL and relock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) xfs_iunlock(ip, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mode = XFS_ILOCK_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto relock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *lockmode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Check that the imap we are going to return to the caller spans the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * range that the caller requested for the IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) imap_spans_range(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct xfs_bmbt_irec *imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) xfs_fileoff_t offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) xfs_fileoff_t end_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (imap->br_startoff > offset_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (imap->br_startoff + imap->br_blockcount < end_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) xfs_direct_write_iomap_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct xfs_bmbt_irec imap, cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int nimaps = 1, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bool shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) u16 iomap_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned lockmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Writes that span EOF might trigger an IO size update on completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * so consider them to be dirty for the purposes of O_DSYNC even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * there is no other metadata changes pending or have been made here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (offset + length > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) iomap_flags |= IOMAP_F_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) error = xfs_ilock_for_iomap(ip, flags, &lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) &nimaps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (imap_needs_cow(ip, flags, &imap, nimaps)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (flags & IOMAP_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* may drop and re-acquire the ilock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) &lockmode, flags & IOMAP_DIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto out_found_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) end_fsb = imap.br_startoff + imap.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) length = XFS_FSB_TO_B(mp, end_fsb) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (imap_needs_alloc(inode, flags, &imap, nimaps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto allocate_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * NOWAIT IO needs to span the entire requested IO with a single map so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * that we avoid partial IO failures due to the rest of the IO range not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * covered by this map triggering an EAGAIN condition when it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * subsequently mapped and aborting the IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if ((flags & IOMAP_NOWAIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) !imap_spans_range(&imap, offset_fsb, end_fsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) allocate_blocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (flags & IOMAP_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * We cap the maximum length we map to a sane size to keep the chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * of work done where somewhat symmetric with the work writeback does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * This is a completely arbitrary number pulled out of thin air as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * best guess for initial testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Note that the values needs to be less than 32-bits wide until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * lower level functions are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) length = min_t(loff_t, length, 1024 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) end_fsb = xfs_iomap_end_fsb(mp, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (offset + length > XFS_ISIZE(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) out_found_cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (imap.br_startblock != HOLESTARTBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) const struct iomap_ops xfs_direct_write_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) .iomap_begin = xfs_direct_write_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) xfs_buffered_write_iomap_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) loff_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct xfs_bmbt_irec imap, cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct xfs_iext_cursor icur, ccur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) xfs_fsblock_t prealloc_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bool eof = false, cow_eof = false, shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int allocfork = XFS_DATA_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* we can't use delayed allocations when using extent size hints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (xfs_get_extsz_hint(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return xfs_direct_write_iomap_begin(inode, offset, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) flags, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ASSERT(!XFS_IS_REALTIME_INODE(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) XFS_STATS_INC(mp, xs_blk_mapw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Search the data fork first to look up our source mapping. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * always need the data fork map, as we have to return it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * iomap code so that the higher level write code can read data in to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * perform read-modify-write cycles for unaligned writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) imap.br_startoff = end_fsb; /* fake hole until the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* We never need to allocate blocks for zeroing a hole. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Search the COW fork extent list even if we did not find a data fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * extent. This serves two purposes: first this implements the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * speculative preallocation using cowextsize, so that we also unshare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * block adjacent to shared blocks instead of just the shared blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * themselves. Second the lookup in the extent list is generally faster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * than going out to the shared extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (xfs_is_cow_inode(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!ip->i_cowfp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ASSERT(!xfs_is_reflink_inode(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) xfs_ifork_init_cow(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) &ccur, &cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!cow_eof && cmap.br_startoff <= offset_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) trace_xfs_reflink_cow_found(ip, &cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) goto found_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (imap.br_startoff <= offset_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * For reflink files we may need a delalloc reservation when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * overwriting shared extents. This includes zeroing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * existing extents that contain data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!xfs_is_cow_inode(ip) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto found_imap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* Trim the mapping to the nearest shared extent boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) error = xfs_bmap_trim_cow(ip, &imap, &shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Not shared? Just report the (potentially capped) extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto found_imap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Fork all the shared blocks from our write offset until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * end of the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) allocfork = XFS_COW_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) end_fsb = imap.br_startoff + imap.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * pages to keep the chunks of work done where somewhat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * symmetric with the work writeback does. This is a completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * arbitrary number pulled out of thin air.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * Note that the values needs to be less than 32-bits wide until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * the lower level functions are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) count = min_t(loff_t, count, 1024 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) end_fsb = xfs_iomap_end_fsb(mp, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (xfs_is_always_cow_inode(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) allocfork = XFS_COW_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) error = xfs_qm_dqattach_locked(ip, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (eof && offset + count > XFS_ISIZE(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * Determine the initial size of the preallocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * We clean up any extra preallocation when the file is closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) prealloc_blocks = mp->m_allocsize_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) offset, count, &icur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (prealloc_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) xfs_extlen_t align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) xfs_off_t end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) xfs_fileoff_t p_end_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) prealloc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) align = xfs_eof_alignment(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) p_end_fsb = roundup_64(p_end_fsb, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) p_end_fsb = min(p_end_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ASSERT(p_end_fsb > offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) prealloc_blocks = p_end_fsb - end_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) end_fsb - offset_fsb, prealloc_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) allocfork == XFS_DATA_FORK ? &imap : &cmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) allocfork == XFS_DATA_FORK ? &icur : &ccur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) allocfork == XFS_DATA_FORK ? eof : cow_eof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) case -EDQUOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* retry without any preallocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) trace_xfs_delalloc_enospc(ip, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (prealloc_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) prealloc_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*FALLTHRU*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (allocfork == XFS_COW_FORK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto found_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * them out if the write happens to fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) found_imap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) found_cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (imap.br_startoff <= offset_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) xfs_trim_extent(&cmap, offset_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) imap.br_startoff - offset_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xfs_buffered_write_iomap_end(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ssize_t written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct iomap *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) xfs_fileoff_t start_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) xfs_fileoff_t end_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (iomap->type != IOMAP_DELALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Behave as if the write failed if drop writes is enabled. Set the NEW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * flag to force delalloc cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) iomap->flags |= IOMAP_F_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * start_fsb refers to the first unused block after a short write. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * nothing was written, round offset down to point at the first block in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (unlikely(!written))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) start_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) start_fsb = XFS_B_TO_FSB(mp, offset + written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) end_fsb = XFS_B_TO_FSB(mp, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Trim delalloc blocks if they were allocated by this write and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * didn't manage to write the whole range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * We don't need to care about racing delalloc as we hold i_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * across the reserve/allocate/unreserve calls. If there are delalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * blocks in the range, they are ours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) XFS_FSB_TO_B(mp, end_fsb) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) end_fsb - start_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (error && !XFS_FORCED_SHUTDOWN(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) xfs_alert(mp, "%s: unable to clean up ino %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) __func__, ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) const struct iomap_ops xfs_buffered_write_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .iomap_begin = xfs_buffered_write_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .iomap_end = xfs_buffered_write_iomap_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) xfs_read_iomap_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct xfs_bmbt_irec imap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) int nimaps = 1, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) bool shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unsigned lockmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) error = xfs_ilock_for_iomap(ip, flags, &lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) &nimaps, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!error && (flags & IOMAP_REPORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) const struct iomap_ops xfs_read_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .iomap_begin = xfs_read_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) xfs_seek_iomap_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) struct xfs_bmbt_irec imap, cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) unsigned lockmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) lockmode = xfs_ilock_data_map_shared(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * If we found a data extent we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (imap.br_startoff <= offset_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) data_fsb = imap.br_startoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * Fake a hole until the end of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) data_fsb = xfs_iomap_end_fsb(mp, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * If a COW fork extent covers the hole, report it - capped to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * data fork extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (xfs_inode_has_cow_data(ip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cow_fsb = cmap.br_startoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (data_fsb < cow_fsb + cmap.br_blockcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) end_fsb = min(end_fsb, data_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) xfs_trim_extent(&cmap, offset_fsb, end_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * This is a COW extent, so we must probe the page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * because there could be dirty page cache being backed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * by this extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) iomap->type = IOMAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Else report a hole, capped to the next found data or COW extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) imap.br_blockcount = cow_fsb - offset_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) imap.br_blockcount = data_fsb - offset_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) imap.br_startoff = offset_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) imap.br_startblock = HOLESTARTBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) imap.br_state = XFS_EXT_NORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) xfs_trim_extent(&imap, offset_fsb, end_fsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) const struct iomap_ops xfs_seek_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .iomap_begin = xfs_seek_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) xfs_xattr_iomap_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct xfs_bmbt_irec imap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int nimaps = 1, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) unsigned lockmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) lockmode = xfs_ilock_attr_map_shared(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* if there are no attribute fork or extents, return ENOENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) &nimaps, XFS_BMAPI_ATTRFORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) xfs_iunlock(ip, lockmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ASSERT(nimaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) const struct iomap_ops xfs_xattr_iomap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .iomap_begin = xfs_xattr_iomap_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) };