^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016-2019 Christoph Hellwig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/migrate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "../internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Structure allocated for each page or THP when block size < page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * to track sub-page uptodate status and I/O completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct iomap_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) atomic_t read_bytes_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) atomic_t write_bytes_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) spinlock_t uptodate_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long uptodate[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline struct iomap_page *to_iomap_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * per-block data is stored in the head page. Callers should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * not be dealing with tail pages (and if they are, they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * call thp_head() first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) VM_BUG_ON_PGFLAGS(PageTail(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (page_has_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return (struct iomap_page *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static struct bio_set iomap_ioend_bioset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct iomap_page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) iomap_page_create(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int nr_blocks = i_blocks_per_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (iop || nr_blocks <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spin_lock_init(&iop->uptodate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bitmap_fill(iop->uptodate, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) attach_page_private(page, iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) iomap_page_release(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct iomap_page *iop = detach_page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!iop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kfree(iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Calculate the range inside the page that we actually need to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) loff_t orig_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) loff_t isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned block_bits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned block_size = (1 << block_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned poff = offset_in_page(*pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned first = poff >> block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned last = (poff + plen - 1) >> block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * If the block size is smaller than the page size we need to check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * per-block uptodate status and adjust the offset and length if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * to avoid reading in already uptodate ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (iop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* move forward for each leading block marked uptodate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (i = first; i <= last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!test_bit(i, iop->uptodate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *pos += block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) poff += block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) plen -= block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) first++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* truncate len if we find any trailing uptodate block(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) for ( ; i <= last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (test_bit(i, iop->uptodate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) plen -= (last - i + 1) * block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) last = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * If the extent spans the block that contains the i_size we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * handle both halves separately so that we properly zero data in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * page cache for blocks that are entirely outside of i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (orig_pos <= isize && orig_pos + length > isize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned end = offset_in_page(isize - 1) >> block_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (first <= end && last > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) plen -= (last - end) * block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *offp = poff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *lenp = plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned first = off >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned last = (off + len - 1) >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spin_lock_irqsave(&iop->uptodate_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bitmap_set(iop->uptodate, first, last - first + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_unlock_irqrestore(&iop->uptodate_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (page_has_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) iomap_iop_set_range_uptodate(page, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) iomap_read_page_end_io(struct bio_vec *bvec, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct page *page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (unlikely(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) iomap_read_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int error = blk_status_to_errno(bio->bi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bio_for_each_segment_all(bvec, bio, iter_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) iomap_read_page_end_io(bvec, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct iomap_readpage_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct page *cur_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool cur_page_in_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct readahead_control *rac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) iomap_read_inline_data(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct iomap *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) size_t size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) BUG_ON(page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) memcpy(addr, iomap->inline_data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) memset(addr + size, 0, PAGE_SIZE - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline bool iomap_block_needs_zeroing(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct iomap *iomap, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return iomap->type != IOMAP_MAPPED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) (iomap->flags & IOMAP_F_NEW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pos >= i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static loff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct iomap_readpage_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct page *page = ctx->cur_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct iomap_page *iop = iomap_page_create(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool same_page = false, is_contig = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) loff_t orig_pos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned poff, plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (iomap->type == IOMAP_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) WARN_ON_ONCE(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) iomap_read_inline_data(inode, page, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* zero post-eof blocks as the page may be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (plen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (iomap_block_needs_zeroing(inode, iomap, pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) zero_user(page, poff, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) iomap_set_range_uptodate(page, poff, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ctx->cur_page_in_bio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (iop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) atomic_add(plen, &iop->read_bytes_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Try to merge into a previous segment if we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) sector = iomap_sector(iomap, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (__bio_try_merge_page(ctx->bio, page, plen, poff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) &same_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) is_contig = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!is_contig || bio_full(ctx->bio, plen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) gfp_t orig_gfp = gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (ctx->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) submit_bio(ctx->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ctx->rac) /* same as readahead_gfp_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) gfp |= __GFP_NORETRY | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * If the bio_alloc fails, try it again for a single page to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * avoid having to deal with partial page reads. This emulates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * what do_mpage_readpage does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!ctx->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ctx->bio = bio_alloc(orig_gfp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ctx->bio->bi_opf = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ctx->rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ctx->bio->bi_opf |= REQ_RAHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ctx->bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bio_set_dev(ctx->bio, iomap->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ctx->bio->bi_end_io = iomap_read_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bio_add_page(ctx->bio, page, plen, poff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Move the caller beyond our range so that it keeps making progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * For that we have to include any leading non-uptodate ranges, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * we can skip trailing ones as they will be handled in the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return pos - orig_pos + plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) iomap_readpage(struct page *page, const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct iomap_readpage_ctx ctx = { .cur_page = page };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned poff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) loff_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) trace_iomap_readpage(page->mapping->host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) for (poff = 0; poff < PAGE_SIZE; poff += ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = iomap_apply(inode, page_offset(page) + poff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) PAGE_SIZE - poff, 0, ops, &ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) iomap_readpage_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) WARN_ON_ONCE(ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ctx.bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) submit_bio(ctx.bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) WARN_ON_ONCE(!ctx.cur_page_in_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) WARN_ON_ONCE(ctx.cur_page_in_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Just like mpage_readahead and block_read_full_page we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * return 0 and just mark the page as PageError on errors. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * should be cleaned up all through the stack eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) EXPORT_SYMBOL_GPL(iomap_readpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static loff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void *data, struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct iomap_readpage_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) loff_t done, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (done = 0; done < length; done += ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (ctx->cur_page && offset_in_page(pos + done) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!ctx->cur_page_in_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unlock_page(ctx->cur_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) put_page(ctx->cur_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ctx->cur_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!ctx->cur_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ctx->cur_page = readahead_page(ctx->rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ctx->cur_page_in_bio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = iomap_readpage_actor(inode, pos + done, length - done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ctx, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * iomap_readahead - Attempt to read pages from a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @rac: Describes the pages to be read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @ops: The operations vector for the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * This function is for filesystems to call to implement their readahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * address_space operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Context: The @ops callbacks may submit I/O (eg to read the addresses of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * blocks from disc), and may wait for it. The caller may be trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * access a different page, and so sleeping excessively should be avoided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * It may allocate memory, but should avoid costly allocations. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * function is called with memalloc_nofs set, so allocations will not cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * the filesystem to be reentered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct inode *inode = rac->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) loff_t pos = readahead_pos(rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) loff_t length = readahead_length(rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct iomap_readpage_ctx ctx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .rac = rac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) trace_iomap_readahead(inode, readahead_count(rac));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) while (length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) loff_t ret = iomap_apply(inode, pos, length, 0, ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) &ctx, iomap_readahead_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) WARN_ON_ONCE(ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) length -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (ctx.bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) submit_bio(ctx.bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (ctx.cur_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!ctx.cur_page_in_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unlock_page(ctx.cur_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) put_page(ctx.cur_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EXPORT_SYMBOL_GPL(iomap_readahead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * iomap_is_partially_uptodate checks whether blocks within a page are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * uptodate or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Returns true if all blocks which correspond to a file portion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * we want to read within the page are uptodate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) iomap_is_partially_uptodate(struct page *page, unsigned long from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned len, first, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Limit range to one page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) len = min_t(unsigned, PAGE_SIZE - from, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* First and last blocks in range within page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) first = from >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) last = (from + len - 1) >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (iop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) for (i = first; i <= last; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!test_bit(i, iop->uptodate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) iomap_releasepage(struct page *page, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) trace_iomap_releasepage(page->mapping->host, page_offset(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * mm accommodates an old ext3 case where clean pages might not have had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * the dirty bit cleared. Thus, it can send actual dirty pages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * ->releasepage() via shrink_active_list(), skip those here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (PageDirty(page) || PageWriteback(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) iomap_page_release(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL_GPL(iomap_releasepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) trace_iomap_invalidatepage(page->mapping->host, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * If we are invalidating the entire page, clear the dirty state from it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * and release it to avoid unnecessary buildup of the LRU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (offset == 0 && len == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) WARN_ON_ONCE(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) cancel_dirty_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) iomap_page_release(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) EXPORT_SYMBOL_GPL(iomap_invalidatepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) iomap_migrate_page(struct address_space *mapping, struct page *newpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct page *page, enum migrate_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ret = migrate_page_move_mapping(mapping, newpage, page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ret != MIGRATEPAGE_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (page_has_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) attach_page_private(newpage, detach_page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (mode != MIGRATE_SYNC_NO_COPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) migrate_page_copy(newpage, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) migrate_page_states(newpage, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return MIGRATEPAGE_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) EXPORT_SYMBOL_GPL(iomap_migrate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #endif /* CONFIG_MIGRATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) IOMAP_WRITE_F_UNSHARE = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) loff_t i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Only truncate newly allocated pages beyoned EOF, even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * write started inside the existing inode size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (pos + len > i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) truncate_pagecache_range(inode, max(pos, i_size), pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned plen, struct iomap *iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct bio bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bio_init(&bio, &bvec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bio.bi_opf = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bio_set_dev(&bio, iomap->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __bio_add_page(&bio, page, plen, poff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return submit_bio_wait(&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct page *page, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct iomap_page *iop = iomap_page_create(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) loff_t block_size = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) loff_t block_start = round_down(pos, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) loff_t block_end = round_up(pos + len, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned from = offset_in_page(pos), to = from + len, poff, plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) iomap_adjust_read_range(inode, iop, &block_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) block_end - block_start, &poff, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (plen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) (from <= poff || from >= poff + plen) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) (to <= poff || to >= poff + plen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) zero_user_segments(page, poff, from, to, poff + plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int status = iomap_read_page_sync(block_start, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) poff, plen, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) iomap_set_range_uptodate(page, poff, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } while ((block_start += plen) < block_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) const struct iomap_page_ops *page_ops = iomap->page_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) BUG_ON(pos + len > iomap->offset + iomap->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (srcmap != iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) BUG_ON(pos + len > srcmap->offset + srcmap->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (page_ops && page_ops->page_prepare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) status = page_ops->page_prepare(inode, pos, len, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) AOP_FLAG_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto out_no_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (srcmap->type == IOMAP_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) iomap_read_inline_data(inode, page, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) status = __block_write_begin_int(page, pos, len, NULL, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) status = __iomap_write_begin(inode, pos, len, flags, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) iomap_write_failed(inode, pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) out_no_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (page_ops && page_ops->page_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) page_ops->page_done(inode, pos, 0, NULL, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) iomap_set_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct address_space *mapping = page_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int newly_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (unlikely(!mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return !TestSetPageDirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Lock out page->mem_cgroup migration to keep PageDirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * synchronized with per-memcg dirty page counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) lock_page_memcg(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) newly_dirty = !TestSetPageDirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (newly_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __set_page_dirty(page, mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unlock_page_memcg(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (newly_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return newly_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) size_t copied, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * The blocks that were entirely written will now be uptodate, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * don't have to worry about a readpage reading them and overwriting a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * partial write. However if we have encountered a short write and only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * partially written into a block, it will not be marked uptodate, so a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * readpage might come in and destroy our partial write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Do the simplest thing, and just treat any short write to a non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * uptodate page as a zero-length write, and force the caller to redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * the whole thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (unlikely(copied < len && !PageUptodate(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) iomap_set_range_uptodate(page, offset_in_page(pos), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) iomap_set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct iomap *iomap, loff_t pos, size_t copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) WARN_ON_ONCE(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) memcpy(iomap->inline_data + pos, addr + pos, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) size_t copied, struct page *page, struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) const struct iomap_page_ops *page_ops = iomap->page_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) loff_t old_size = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (srcmap->type == IOMAP_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) page, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ret = __iomap_write_end(inode, pos, len, copied, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Update the in-memory inode size after copying the data into the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * cache. It's up to the file system to write the updated size to disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * preferably after I/O completion so that no stale data is exposed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (pos + ret > old_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) i_size_write(inode, pos + ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) iomap->flags |= IOMAP_F_SIZE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (old_size < pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pagecache_isize_extended(inode, old_size, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (page_ops && page_ops->page_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) page_ops->page_done(inode, pos, ret, page, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (ret < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) iomap_write_failed(inode, pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static loff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct iov_iter *i = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) long status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ssize_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned long offset; /* Offset into pagecache page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned long bytes; /* Bytes to write to page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) size_t copied; /* Bytes copied from user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) offset = offset_in_page(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bytes = min_t(unsigned long, PAGE_SIZE - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) iov_iter_count(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (bytes > length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bytes = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Bring in the user page that we will copy from _first_.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Otherwise there's a nasty deadlock on copying from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * same page as we're writing to, without it being marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * up-to-date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Not only is this an optimisation, but it is also required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * to check that the address is actually valid, when atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * usercopies are used, below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (mapping_writably_mapped(inode->i_mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) iov_iter_advance(i, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (unlikely(copied == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * If we were unable to copy any data at all, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * fall back to a single segment length write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * If we didn't fallback here, we could livelock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * because not all segments in the iov can be copied at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * once without a pagefault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) bytes = min_t(unsigned long, PAGE_SIZE - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) iov_iter_single_seg_count(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pos += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) written += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) length -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) balance_dirty_pages_ratelimited(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) } while (iov_iter_count(i) && length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return written ? written : status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct inode *inode = iocb->ki_filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) loff_t pos = iocb->ki_pos, ret = 0, written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) while (iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ret = iomap_apply(inode, pos, iov_iter_count(iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) IOMAP_WRITE, ops, iter, iomap_write_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) written += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return written ? written : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static loff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) long status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) loff_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* don't bother with blocks that are not shared to start with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!(iomap->flags & IOMAP_F_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* don't bother with holes or unwritten extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned long offset = offset_in_page(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) status = iomap_write_begin(inode, pos, bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (WARN_ON_ONCE(status == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pos += status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) written += status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) length -= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) balance_dirty_pages_ratelimited(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) } while (length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) loff_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) iomap_unshare_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) EXPORT_SYMBOL_GPL(iomap_file_unshare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned offset = offset_in_page(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) zero_user(page, offset, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) mark_page_accessed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) loff_t length, void *data, struct iomap *iomap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) bool *did_zero = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) loff_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* already zeroed? we're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) s64 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) bytes = dax_iomap_zero(pos, length, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) bytes = iomap_zero(inode, pos, length, iomap, srcmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (bytes < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) pos += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) length -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) written += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (did_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *did_zero = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) } while (length > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) loff_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ops, did_zero, iomap_zero_range_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) EXPORT_SYMBOL_GPL(iomap_zero_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned int blocksize = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned int off = pos & (blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Block boundary? Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) EXPORT_SYMBOL_GPL(iomap_truncate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static loff_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) void *data, struct iomap *iomap, struct iomap *srcmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct page *page = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ret = __block_write_begin_int(page, pos, length, NULL, iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) block_commit_write(page, 0, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) WARN_ON_ONCE(!PageUptodate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) iomap_page_create(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) unsigned long length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = page_mkwrite_check_truncate(page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) length = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) offset = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) while (length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ret = iomap_apply(inode, offset, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) IOMAP_WRITE | IOMAP_FAULT, ops, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) iomap_page_mkwrite_actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) length -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) wait_for_stable_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return VM_FAULT_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return block_page_mkwrite_return(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) iomap_finish_page_writeback(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int error, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) mapping_set_error(inode->i_mapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * We're now finished for good with this ioend structure. Update the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * state, release holds on bios, and finally free up memory. Do not use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * ioend after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) iomap_finish_ioend(struct iomap_ioend *ioend, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct inode *inode = ioend->io_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct bio *bio = &ioend->io_inline_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct bio *last = ioend->io_bio, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) u64 start = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) loff_t offset = ioend->io_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) bool quiet = bio_flagged(bio, BIO_QUIET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) for (bio = &ioend->io_inline_bio; bio; bio = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * For the last bio, bi_private points to the ioend, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * need to explicitly end the iteration here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (bio == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) next = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* walk each page on bio, ending page IO on them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) bio_for_each_segment_all(bv, bio, iter_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) iomap_finish_page_writeback(inode, bv->bv_page, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) bv->bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* The ioend has been freed by bio_put() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (unlikely(error && !quiet)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) printk_ratelimited(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) "%s: writeback error on inode %lu, offset %lld, sector %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) inode->i_sb->s_id, inode->i_ino, offset, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) iomap_finish_ioends(struct iomap_ioend *ioend, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct list_head tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) list_replace_init(&ioend->io_list, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) iomap_finish_ioend(ioend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) while (!list_empty(&tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) list_del_init(&ioend->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) iomap_finish_ioend(ioend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) EXPORT_SYMBOL_GPL(iomap_finish_ioends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * We can merge two adjacent ioends if they have the same set of work to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (ioend->io_bio->bi_status != next->io_bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if ((ioend->io_flags & IOMAP_F_SHARED) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) (next->io_flags & IOMAP_F_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if ((ioend->io_type == IOMAP_UNWRITTEN) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) (next->io_type == IOMAP_UNWRITTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (ioend->io_offset + ioend->io_size != next->io_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) void (*merge_private)(struct iomap_ioend *ioend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct iomap_ioend *next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct iomap_ioend *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) INIT_LIST_HEAD(&ioend->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) io_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!iomap_ioend_can_merge(ioend, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) list_move_tail(&next->io_list, &ioend->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ioend->io_size += next->io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (next->io_private && merge_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) merge_private(ioend, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (ia->io_offset < ib->io_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (ia->io_offset > ib->io_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) iomap_sort_ioends(struct list_head *ioend_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) list_sort(NULL, ioend_list, iomap_ioend_compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) EXPORT_SYMBOL_GPL(iomap_sort_ioends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void iomap_writepage_end_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct iomap_ioend *ioend = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Submit the final bio for an ioend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * If @error is non-zero, it means that we have a situation where some part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * the submission process has failed after we have marked paged for writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * and unlocked them. In this situation, we need to fail the bio instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * submitting it. This typically only happens on a filesystem shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ioend->io_bio->bi_private = ioend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (wpc->ops->prepare_ioend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) error = wpc->ops->prepare_ioend(ioend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * If we are failing the IO now, just mark the ioend with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * error and finish it. This will run IO completion immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * as there is only one reference to the ioend at this point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ioend->io_bio->bi_status = errno_to_blk_status(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) bio_endio(ioend->io_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) submit_bio(ioend->io_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static struct iomap_ioend *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) loff_t offset, sector_t sector, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct iomap_ioend *ioend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) bio_set_dev(bio, wpc->iomap.bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) bio->bi_write_hint = inode->i_write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) wbc_init_bio(wbc, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) INIT_LIST_HEAD(&ioend->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ioend->io_type = wpc->iomap.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) ioend->io_flags = wpc->iomap.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ioend->io_inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ioend->io_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ioend->io_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ioend->io_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ioend->io_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return ioend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * Allocate a new bio, and chain the old bio to the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Note that we have to do perform the chaining in this unintuitive order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * so that the bi_private linkage is set up in the right direction for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * traversal in iomap_finish_ioend().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static struct bio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) iomap_chain_bio(struct bio *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct bio *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) bio_copy_dev(new, prev);/* also copies over blkcg information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) new->bi_iter.bi_sector = bio_end_sector(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) new->bi_opf = prev->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) new->bi_write_hint = prev->bi_write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) bio_chain(prev, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) bio_get(prev); /* for iomap_finish_ioend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) submit_bio(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) (wpc->ioend->io_flags & IOMAP_F_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (wpc->iomap.type != wpc->ioend->io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (sector != bio_end_sector(wpc->ioend->io_bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Test to see if we have an existing ioend structure that we could append to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * first, otherwise finish off the current ioend and start another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct writeback_control *wbc, struct list_head *iolist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) sector_t sector = iomap_sector(&wpc->iomap, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned len = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) unsigned poff = offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) bool merged, same_page = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (wpc->ioend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) list_add(&wpc->ioend->io_list, iolist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) &same_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (iop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) atomic_add(len, &iop->write_bytes_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (!merged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (bio_full(wpc->ioend->io_bio, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) wpc->ioend->io_bio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) iomap_chain_bio(wpc->ioend->io_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) bio_add_page(wpc->ioend->io_bio, page, len, poff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) wpc->ioend->io_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) wbc_account_cgroup_owner(wbc, page, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * We implement an immediate ioend submission policy here to avoid needing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * chain multiple ioends and hence nest mempool allocations which can violate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * forward progress guarantees we need to provide. The current ioend we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * adding blocks to is cached on the writepage context, and if the new block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * does not append to the cached ioend it will create a new ioend and cache that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * If a new ioend is created and cached, the old ioend is returned and queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * locally for submission once the entire page is processed or an error has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * detected. While ioends are submitted immediately after they are completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * batching optimisations are provided by higher level block plugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * At the end of a writeback pass, there will be a cached ioend remaining on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * writepage context that the caller will need to submit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) iomap_writepage_map(struct iomap_writepage_ctx *wpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct writeback_control *wbc, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct page *page, u64 end_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct iomap_page *iop = to_iomap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct iomap_ioend *ioend, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) unsigned len = i_blocksize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) u64 file_offset; /* file offset of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int error = 0, count = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) LIST_HEAD(submit_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Walk through the page to find areas to write back. If we run off the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * end of the current map or find the current map invalid, grab a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) for (i = 0, file_offset = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) i++, file_offset += len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (iop && !test_bit(i, iop->uptodate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) error = wpc->ops->map_blocks(wpc, inode, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (wpc->iomap.type == IOMAP_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) &submit_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) WARN_ON_ONCE(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) WARN_ON_ONCE(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) WARN_ON_ONCE(PageDirty(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * We cannot cancel the ioend directly here on error. We may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * already set other pages under writeback and hence we have to run I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * completion to mark the error state of the pages under writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (unlikely(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * Let the filesystem know what portion of the current page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * failed to map. If the page wasn't been added to ioend, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * won't be affected by I/O completion and we must unlock it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (wpc->ops->discard_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) wpc->ops->discard_page(page, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * Preserve the original error if there was one, otherwise catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * submission errors here and propagate into subsequent ioend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * submissions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) list_del_init(&ioend->io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) error2 = iomap_submit_ioend(wpc, ioend, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (error2 && !error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) error = error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * We can end up here with no error and nothing to write only if we race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * with a partial page truncate on a sub-page block sized filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) mapping_set_error(page->mapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * Write out a dirty page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * For delalloc space on the page we need to allocate space and flush it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * For unwritten space on the page we need to start the conversion to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * regular allocated space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct iomap_writepage_ctx *wpc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pgoff_t end_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) u64 end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * Refuse to write the page out if we are called from reclaim context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * This avoids stack overflows when called from deeply used stacks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * random callers for direct reclaim or memcg reclaim. We explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * allow reclaim from kswapd as the stack usage there is relatively low.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * This should never happen except in the case of a VM regression so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * warn about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) PF_MEMALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) goto redirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * Given that we do not allow direct reclaim to call us, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * never be called in a recursive filesystem reclaim context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) goto redirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * Is this page beyond the end of the file?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * The page index is less than the end_index, adjust the end_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * to the highest offset that this page should represent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * -----------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * | file mapping | <EOF> |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * -----------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * | Page ... | Page N-2 | Page N-1 | Page N | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * ^--------------------------------^----------|--------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * | desired writeback range | see else |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * ---------------------------------^------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) offset = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) end_index = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (page->index < end_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * Check whether the page to write out is beyond or straddles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * i_size or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * -------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * | file mapping | <EOF> |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * -------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * ^--------------------------------^-----------|---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * | | Straddles |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * ---------------------------------^-----------|--------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) unsigned offset_into_page = offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Skip the page if it is fully outside i_size, e.g. due to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * truncate operation that is in progress. We must redirty the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * page so that reclaim stops reclaiming it. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * iomap_vm_releasepage() is called on it and gets confused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * Note that the end_index is unsigned long, it would overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * if the given offset is greater than 16TB on 32-bit system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * and if we do check the page is fully outside i_size or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * via "if (page->index >= end_index + 1)" as "end_index + 1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * will be evaluated to 0. Hence this page will be redirtied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * and be written out repeatedly which would result in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * infinite loop, the user program that perform this operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * will hang. Instead, we can verify this situation by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * if the page to write is totally beyond the i_size or if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * offset is just equal to the EOF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (page->index > end_index ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) (page->index == end_index && offset_into_page == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto redirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * The page straddles i_size. It must be zeroed out on each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * and every writepage invocation because it may be mmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * "A file is mapped in multiples of the page size. For a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * that is not a multiple of the page size, the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * memory is zeroed when mapped, and writes to that region are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * not written out to the file."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) zero_user_segment(page, offset_into_page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* Adjust the end_offset to the end of file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) end_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) redirty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) iomap_writepage(struct page *page, struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct iomap_writepage_ctx *wpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) const struct iomap_writeback_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) wpc->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ret = iomap_do_writepage(page, wbc, wpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (!wpc->ioend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return iomap_submit_ioend(wpc, wpc->ioend, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) EXPORT_SYMBOL_GPL(iomap_writepage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct iomap_writepage_ctx *wpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) const struct iomap_writeback_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) wpc->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!wpc->ioend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return iomap_submit_ioend(wpc, wpc->ioend, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) EXPORT_SYMBOL_GPL(iomap_writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static int __init iomap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) offsetof(struct iomap_ioend, io_inline_bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) fs_initcall(iomap_init);