^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/f2fs/data.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2012 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.samsung.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/blk-crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/cleancache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <trace/events/android_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define NUM_PREALLOC_POST_READ_CTXS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct kmem_cache *bio_post_read_ctx_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct kmem_cache *bio_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static mempool_t *bio_post_read_ctx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct bio_set f2fs_bioset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int __init f2fs_init_bioset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 0, BIOSET_NEED_BVECS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void f2fs_destroy_bioset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bioset_exit(&f2fs_bioset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static bool __is_cp_guaranteed(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct f2fs_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (inode->i_ino == F2FS_META_INO(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) inode->i_ino == F2FS_NODE_INO(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (f2fs_is_compressed_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if ((S_ISREG(inode->i_mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) page_private_gcing(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static enum count_type __read_io_type(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct address_space *mapping = page_file_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (inode->i_ino == F2FS_META_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return F2FS_RD_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (inode->i_ino == F2FS_NODE_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return F2FS_RD_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return F2FS_RD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* postprocessing steps for read bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) enum bio_post_read_step {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef CONFIG_FS_ENCRYPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) STEP_DECRYPT = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) STEP_DECRYPT = 0, /* compile out the decryption-related code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) STEP_DECOMPRESS = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifdef CONFIG_FS_VERITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) STEP_VERITY = 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) STEP_VERITY = 0, /* compile out the verity-related code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct bio_post_read_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct f2fs_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int enabled_steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void f2fs_finish_read_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Update and unlock the bio's pagecache pages, and put the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * decompression context for any compressed pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bio_for_each_segment_all(bv, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct page *page = bv->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (f2fs_is_compressed_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (bio->bi_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) f2fs_end_read_compressed_page(page, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) f2fs_put_page_dic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* PG_error was set if decryption or verity failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (bio->bi_status || PageError(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* will re-read again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dec_page_count(F2FS_P_SB(page), __read_io_type(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (bio->bi_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mempool_free(bio->bi_private, bio_post_read_ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void f2fs_verify_bio(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct bio_post_read_ctx *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) container_of(work, struct bio_post_read_ctx, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct bio *bio = ctx->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * fsverity_verify_bio() may call readpages() again, and while verity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * will be disabled for this, decryption and/or decompression may still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * be needed, resulting in another bio_post_read_ctx being allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * So to prevent deadlocks we need to release the current ctx to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * mempool first. This assumes that verity is the last post-read step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mempool_free(ctx, bio_post_read_ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bio->bi_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Verify the bio's pages with fs-verity. Exclude compressed pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * as those were handled separately by f2fs_end_read_compressed_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (may_have_compressed_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bio_for_each_segment_all(bv, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct page *page = bv->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!f2fs_is_compressed_page(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) !PageError(page) && !fsverity_verify_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) fsverity_verify_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) f2fs_finish_read_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If the bio's data needs to be verified with fs-verity, then enqueue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * verity work for the bio. Otherwise finish the bio now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Note that to avoid deadlocks, the verity work can't be done on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * decryption/decompression workqueue. This is because verifying the data pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * can involve reading verity metadata pages from the file, and these verity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * metadata pages may be encrypted and/or compressed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void f2fs_verify_and_finish_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct bio_post_read_ctx *ctx = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) INIT_WORK(&ctx->work, f2fs_verify_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) fsverity_enqueue_verify_work(&ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) f2fs_finish_read_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * remaining page was read by @ctx->bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Note that a bio may span clusters (even a mix of compressed and uncompressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * that the bio includes at least one compressed page. The actual decompression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * is done on a per-cluster basis, not a per-bio basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool all_compressed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bio_for_each_segment_all(bv, ctx->bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct page *page = bv->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* PG_error was set if decryption failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (f2fs_is_compressed_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) f2fs_end_read_compressed_page(page, PageError(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) all_compressed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) blkaddr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Optimization: if all the bio's pages are compressed, then scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * the per-bio verity work is unnecessary, as verity will be fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * handled at the compression cluster level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (all_compressed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ctx->enabled_steps &= ~STEP_VERITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void f2fs_post_read_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct bio_post_read_ctx *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) container_of(work, struct bio_post_read_ctx, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (ctx->enabled_steps & STEP_DECRYPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) fscrypt_decrypt_bio(ctx->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (ctx->enabled_steps & STEP_DECOMPRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) f2fs_handle_step_decompress(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) f2fs_verify_and_finish_bio(ctx->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void f2fs_read_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct bio_post_read_ctx *ctx = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (time_to_inject(sbi, FAULT_READ_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) f2fs_show_injection_info(sbi, FAULT_READ_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) f2fs_finish_read_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) INIT_WORK(&ctx->work, f2fs_post_read_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) queue_work(ctx->sbi->post_read_wq, &ctx->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) f2fs_verify_and_finish_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void f2fs_write_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct f2fs_sb_info *sbi = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (time_to_inject(sbi, FAULT_WRITE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct page *page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) enum count_type type = WB_DATA_TYPE(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (page_private_dummy(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) clear_page_private_dummy(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mempool_free(page, sbi->write_io_dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (unlikely(bio->bi_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) f2fs_stop_checkpoint(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) fscrypt_finalize_bounce_page(&page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (f2fs_is_compressed_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) f2fs_compress_write_end_io(bio, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (unlikely(bio->bi_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) mapping_set_error(page->mapping, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (type == F2FS_WB_CP_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) f2fs_stop_checkpoint(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) page->index != nid_of_node(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dec_page_count(sbi, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (f2fs_in_warm_node_list(sbi, page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) f2fs_del_fsync_node_entry(sbi, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) wq_has_sleeper(&sbi->cp_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) wake_up(&sbi->cp_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) block_t blk_addr, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct block_device *bdev = sbi->sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (f2fs_is_multi_device(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < sbi->s_ndevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (FDEV(i).start_blk <= blk_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) FDEV(i).end_blk >= blk_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) blk_addr -= FDEV(i).start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) bdev = FDEV(i).bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!f2fs_is_multi_device(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) for (i = 0; i < sbi->s_ndevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Return true, if pre_bio's bdev is same as its target device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static bool __same_bdev(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) block_t blk_addr, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct f2fs_sb_info *sbi = fio->sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) f2fs_target_device(sbi, fio->new_blkaddr, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (is_read_io(fio->op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bio->bi_end_io = f2fs_read_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bio->bi_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bio->bi_end_io = f2fs_write_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bio->bi_private = sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) fio->type, fio->temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (fio->io_wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) wbc_init_bio(fio->io_wbc, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pgoff_t first_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) const struct f2fs_io_info *fio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * The f2fs garbage collector sets ->encrypted_page when it wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * read/write raw data without encryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!fio || !fio->encrypted_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) else if (fscrypt_inode_should_skip_dm_default_key(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bio_set_skip_dm_default_key(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pgoff_t next_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * The f2fs garbage collector sets ->encrypted_page when it wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * read/write raw data without encryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (fio && fio->encrypted_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return !bio_has_crypt_ctx(bio) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) (bio_should_skip_dm_default_key(bio) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) fscrypt_inode_should_skip_dm_default_key(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return fscrypt_mergeable_bio(bio, inode, next_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static inline void __submit_bio(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct bio *bio, enum page_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!is_read_io(bio_op(bio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (type != DATA && type != NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) goto submit_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (f2fs_lfs_mode(sbi) && current->plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) blk_finish_plug(current->plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!F2FS_IO_ALIGNED(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto submit_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) start %= F2FS_IO_SIZE(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto submit_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* fill dummy pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) for (; start < F2FS_IO_SIZE(sbi); start++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct page *page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mempool_alloc(sbi->write_io_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) GFP_NOIO | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) f2fs_bug_on(sbi, !page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) set_page_private_dummy(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * In the NODE case, we lose next block address chain. So, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * need to do checkpoint in f2fs_sync_file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (type == NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) set_sbi_flag(sbi, SBI_NEED_CP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) submit_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (is_read_io(bio_op(bio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) trace_f2fs_submit_read_bio(sbi->sb, type, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) trace_f2fs_submit_write_bio(sbi->sb, type, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) void f2fs_submit_bio(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct bio *bio, enum page_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __submit_bio(sbi, bio, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void __attach_io_flag(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct f2fs_sb_info *sbi = fio->sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned int io_flag, fua_flag, meta_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (fio->type == DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) io_flag = sbi->data_io_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else if (fio->type == NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) io_flag = sbi->node_io_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) fua_flag = io_flag & temp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * data/node io flag bits per temp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * REQ_META | REQ_FUA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * 5 | 4 | 3 | 2 | 1 | 0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Cold | Warm | Hot | Cold | Warm | Hot |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if ((1 << fio->temp) & meta_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) fio->op_flags |= REQ_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if ((1 << fio->temp) & fua_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) fio->op_flags |= REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void __submit_merged_bio(struct f2fs_bio_info *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct f2fs_io_info *fio = &io->fio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!io->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) __attach_io_flag(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (is_read_io(fio->op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) __submit_bio(io->sbi, io->bio, fio->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) io->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static bool __has_merged_page(struct bio *bio, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct page *page, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!inode && !page && !ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct page *target = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (fscrypt_is_bounce_page(target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) target = fscrypt_pagecache_page(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (IS_ERR(target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (f2fs_is_compressed_page(target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) target = f2fs_compress_control_page(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (IS_ERR(target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (inode && inode == target->mapping->host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (page && page == target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (ino && ino == ino_of_node(target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) enum page_type type, enum temp_type temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) enum page_type btype = PAGE_TYPE_OF_BIO(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) f2fs_down_write(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* change META to META_FLUSH in the checkpoint procedure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (type >= META_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) io->fio.type = META_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) io->fio.op = REQ_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (!test_opt(sbi, NOBARRIER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __submit_merged_bio(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) f2fs_up_write(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nid_t ino, enum page_type type, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) enum temp_type temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) enum page_type btype = PAGE_TYPE_OF_BIO(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) f2fs_down_read(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = __has_merged_page(io->bio, inode, page, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) f2fs_up_read(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) __f2fs_submit_merged_write(sbi, type, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* TODO: use HOT temp only for meta pages now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (type >= META)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) nid_t ino, enum page_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) __submit_merged_write_cond(sbi, inode, page, ino, type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) f2fs_submit_merged_write(sbi, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) f2fs_submit_merged_write(sbi, NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) f2fs_submit_merged_write(sbi, META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Fill the locked page with data located in the block address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * A caller needs to unlock the page on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int f2fs_submit_page_bio(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct page *page = fio->encrypted_page ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) fio->encrypted_page : fio->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) fio->is_por ? META_POR : (__is_meta_io(fio) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) META_GENERIC : DATA_GENERIC_ENHANCE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) trace_f2fs_submit_page_bio(page, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* Allocate a new bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) bio = __bio_alloc(fio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) fio->page->index, fio, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (fio->io_wbc && !is_read_io(fio->op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) __attach_io_flag(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bio_set_op_attrs(bio, fio->op, fio->op_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) inc_page_count(fio->sbi, is_read_io(fio->op) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) __read_io_type(page): WB_DATA_TYPE(fio->page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __submit_bio(fio->sbi, bio, fio->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) block_t last_blkaddr, block_t cur_blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (unlikely(sbi->max_io_bytes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) bio->bi_iter.bi_size >= sbi->max_io_bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (last_blkaddr + 1 != cur_blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return __same_bdev(sbi, cur_blkaddr, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static bool io_type_is_mergeable(struct f2fs_bio_info *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (io->fio.op != fio->op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return io->fio.op_flags == fio->op_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct f2fs_bio_info *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct f2fs_io_info *fio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) block_t last_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) block_t cur_blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned int filled_blocks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) unsigned int io_size = F2FS_IO_SIZE(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* IOs in bio is aligned and left space of vectors is not enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!(filled_blocks % io_size) && left_vecs < io_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return io_type_is_mergeable(io, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct page *page, enum temp_type temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct bio_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) be->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) bio_get(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) f2fs_bug_on(sbi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) f2fs_down_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) list_add_tail(&be->list, &io->bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) f2fs_up_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static void del_bio_entry(struct bio_entry *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) list_del(&be->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) kmem_cache_free(bio_entry_slab, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct f2fs_sb_info *sbi = fio->sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) enum temp_type temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct list_head *head = &io->bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct bio_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) f2fs_down_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) list_for_each_entry(be, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (be->bio != *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) *fio->last_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) fio->new_blkaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (f2fs_crypt_mergeable_bio(*bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) fio->page->mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) fio->page->index, fio) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bio_add_page(*bio, page, PAGE_SIZE, 0) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* page can't be merged into bio; submit the bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) del_bio_entry(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) __submit_bio(sbi, *bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) f2fs_up_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct bio **bio, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) enum temp_type temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct bio *target = bio ? *bio : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct list_head *head = &io->bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct bio_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) f2fs_down_read(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) list_for_each_entry(be, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) found = (target == be->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) found = __has_merged_page(be->bio, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) f2fs_up_read(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) f2fs_down_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) list_for_each_entry(be, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) found = (target == be->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) found = __has_merged_page(be->bio, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) target = be->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) del_bio_entry(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) f2fs_up_write(&io->bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) __submit_bio(sbi, target, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (bio && *bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) bio_put(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) int f2fs_merge_page_bio(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct bio *bio = *fio->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct page *page = fio->encrypted_page ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) fio->encrypted_page : fio->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) trace_f2fs_submit_page_bio(page, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) fio->new_blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) alloc_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) bio = __bio_alloc(fio, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) __attach_io_flag(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) fio->page->index, fio, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) bio_set_op_attrs(bio, fio->op, fio->op_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) add_bio_entry(fio->sbi, bio, page, fio->temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (add_ipu_page(fio, &bio, page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto alloc_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (fio->io_wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) inc_page_count(fio->sbi, WB_DATA_TYPE(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) *fio->last_block = fio->new_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *fio->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) void f2fs_submit_page_write(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct f2fs_sb_info *sbi = fio->sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct page *bio_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) f2fs_bug_on(sbi, is_read_io(fio->op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) f2fs_down_write(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (fio->in_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) spin_lock(&io->io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (list_empty(&io->io_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_unlock(&io->io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) fio = list_first_entry(&io->io_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct f2fs_io_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) list_del(&fio->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_unlock(&io->io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) verify_fio_blkaddr(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (fio->encrypted_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) bio_page = fio->encrypted_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) else if (fio->compressed_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bio_page = fio->compressed_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bio_page = fio->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* set submitted = true as a return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) fio->submitted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) inc_page_count(sbi, WB_DATA_TYPE(bio_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (io->bio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) fio->new_blkaddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) bio_page->index, fio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) __submit_merged_bio(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) alloc_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (io->bio == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (F2FS_IO_ALIGNED(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) (fio->type == DATA || fio->type == NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dec_page_count(sbi, WB_DATA_TYPE(bio_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) fio->retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) bio_page->index, fio, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) io->fio = *fio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) __submit_merged_bio(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto alloc_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (fio->io_wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) io->last_block_in_bio = fio->new_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) trace_f2fs_submit_page_write(fio->page, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (fio->in_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) !f2fs_is_checkpoint_ready(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) __submit_merged_bio(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) f2fs_up_write(&io->io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) unsigned nr_pages, unsigned op_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) pgoff_t first_idx, bool for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct bio_post_read_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) unsigned int post_read_steps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) min_t(int, nr_pages, BIO_MAX_PAGES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) &f2fs_bioset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) f2fs_target_device(sbi, blkaddr, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) bio->bi_end_io = f2fs_read_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (fscrypt_inode_uses_fs_layer_crypto(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) post_read_steps |= STEP_DECRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (f2fs_need_verity(inode, first_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) post_read_steps |= STEP_VERITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * STEP_DECOMPRESS is handled specially, since a compressed file might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * contain both compressed and uncompressed clusters. We'll allocate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * bio_post_read_ctx if the file is compressed, but the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * responsible for enabling STEP_DECOMPRESS if it's actually needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (post_read_steps || f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Due to the mempool, this never fails. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ctx->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ctx->sbi = sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ctx->enabled_steps = post_read_steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) bio->bi_private = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* This can handle encryption stuffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int f2fs_submit_page_read(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) block_t blkaddr, int op_flags, bool for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) page->index, for_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (IS_ERR(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* wait for GCed page writeback via META_MAPPING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) f2fs_wait_on_block_writeback(inode, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) inc_page_count(sbi, F2FS_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __submit_bio(sbi, bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void __set_data_blkaddr(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct f2fs_node *rn = F2FS_NODE(dn->node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) __le32 *addr_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) base = get_extra_isize(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Get physical address of data block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) addr_array = blkaddr_in_node(rn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * Lock ordering for the change of data block address:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * ->data_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * ->node_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * update block addresses in the node page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) __set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (set_page_dirty(dn->node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dn->data_blkaddr = blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) f2fs_set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) f2fs_update_extent_cache(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* dn->ofs_in_node will be returned with up-to-date last block pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dn->ofs_in_node, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) for (; count > 0; dn->ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) block_t blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) dn->data_blkaddr = NEW_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) __set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (set_page_dirty(dn->node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) dn->node_changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Should keep dn->ofs_in_node unchanged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int f2fs_reserve_new_block(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) unsigned int ofs_in_node = dn->ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ret = f2fs_reserve_new_blocks(dn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dn->ofs_in_node = ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) bool need_put = dn->inode_page ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (dn->data_blkaddr == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) err = f2fs_reserve_new_block(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (err || need_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) f2fs_put_dnode(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct extent_info ei = {0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct inode *inode = dn->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (f2fs_lookup_extent_cache(inode, index, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dn->data_blkaddr = ei.blk + index - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return f2fs_reserve_block(dn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int op_flags, bool for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct extent_info ei = {0,0,0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) page = f2fs_grab_cache_page(mapping, index, for_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (f2fs_lookup_extent_cache(inode, index, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dn.data_blkaddr = ei.blk + index - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) DATA_GENERIC_ENHANCE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) goto put_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) goto put_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto put_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (dn.data_blkaddr != NEW_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto put_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * A new dentry page is allocated but not able to be written, since its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * new inode page couldn't be allocated due to -ENOSPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * In such the case, its blkaddr can be remained as NEW_ADDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * see, f2fs_add_link -> f2fs_get_new_data_page ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * f2fs_init_inode_metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (dn.data_blkaddr == NEW_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) op_flags, for_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto put_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) put_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) page = find_get_page(mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (page && PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) f2fs_put_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) page = f2fs_get_read_data_page(inode, index, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) wait_on_page_locked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) f2fs_put_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * If it tries to access a hole, return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * Because, the callers, functions in dir.c and GC, should be able to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * whether this page exists or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) bool for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) page = f2fs_get_read_data_page(inode, index, 0, for_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* wait for read completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (unlikely(page->mapping != mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * Caller ensures that this data page is never allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * A new zero-filled data page is allocated in the page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * f2fs_unlock_op().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * Note that, ipage is set only by make_empty_dir, and if any error occur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * ipage should be released by this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct page *f2fs_get_new_data_page(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct page *ipage, pgoff_t index, bool new_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) page = f2fs_grab_cache_page(mapping, index, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * before exiting, we should make sure ipage will be released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * if any error occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) set_new_dnode(&dn, inode, ipage, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) err = f2fs_reserve_block(&dn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!ipage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (dn.data_blkaddr == NEW_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* if ipage exists, blkaddr should be NEW_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) f2fs_bug_on(F2FS_I_SB(inode), ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) page = f2fs_get_lock_data_page(inode, index, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (new_i_size && i_size_read(inode) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ((loff_t)(index + 1) << PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct f2fs_summary sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) block_t old_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) blkcnt_t count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) dn->data_blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (dn->data_blkaddr != NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) old_blkaddr = dn->data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) &sum, seg_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) invalidate_mapping_pages(META_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) old_blkaddr, old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) f2fs_invalidate_compress_page(sbi, old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * i_size will be updated by direct_IO. Otherwise, we'll get stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * data from unwritten block via dio_read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) bool direct_io = iocb->ki_flags & IOCB_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (map.m_len > map.m_lblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) map.m_len -= map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) map.m_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) map.m_next_extent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) map.m_may_create = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (direct_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) flag = f2fs_force_buffered_io(inode, iocb, from) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) F2FS_GET_BLOCK_PRE_AIO :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) F2FS_GET_BLOCK_PRE_DIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) goto map_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (f2fs_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) flag = F2FS_GET_BLOCK_PRE_AIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) map_blocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) err = f2fs_map_blocks(inode, &map, 1, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (map.m_len > 0 && err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!direct_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) set_inode_flag(inode, FI_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (flag == F2FS_GET_BLOCK_PRE_AIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) f2fs_down_read(&sbi->node_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) f2fs_up_read(&sbi->node_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * f2fs_map_blocks() tries to find or build mapping relationship which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * maps continuous logical blocks to physical blocks, and return such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * info via f2fs_map_blocks structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) int create, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) unsigned int maxblocks = map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) pgoff_t pgofs, end_offset, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) int err = 0, ofs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) unsigned int ofs_in_node, last_ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) blkcnt_t prealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct extent_info ei = {0,0,0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) unsigned int start_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!maxblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) map->m_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) map->m_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* it only supports block size == page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) pgofs = (pgoff_t)map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) end = pgofs + maxblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) map->m_may_create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) goto next_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) map->m_pblk = ei.blk + pgofs - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) map->m_flags = F2FS_MAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (map->m_next_extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) *map->m_next_extent = pgofs + map->m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* for hardware encryption, but to avoid potential issue in future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (flag == F2FS_GET_BLOCK_DIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) f2fs_wait_on_block_writeback_range(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) map->m_pblk, map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) next_dnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (map->m_may_create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) f2fs_do_map_lock(sbi, flag, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* When reading holes, we need its node page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (flag == F2FS_GET_BLOCK_BMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) map->m_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * There is one exceptional case that read_node_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * may return -ENOENT due to filesystem has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * shutdown or cp_error, so force to convert error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * number to EIO for such case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (map->m_may_create &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (map->m_next_pgofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) *map->m_next_pgofs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) f2fs_get_next_page_offset(&dn, pgofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (map->m_next_extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) *map->m_next_extent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) f2fs_get_next_page_offset(&dn, pgofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) start_pgofs = pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) prealloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) next_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (__is_valid_data_blkaddr(blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (__is_valid_data_blkaddr(blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* use out-place-update for driect IO under LFS mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) map->m_may_create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) err = __allocate_data_block(&dn, map->m_seg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) blkaddr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) set_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (flag == F2FS_GET_BLOCK_PRE_AIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) prealloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) last_ofs_in_node = dn.ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) flag != F2FS_GET_BLOCK_DIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) err = __allocate_data_block(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) map->m_seg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) set_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) map->m_flags |= F2FS_MAP_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) blkaddr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (flag == F2FS_GET_BLOCK_BMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) map->m_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (flag == F2FS_GET_BLOCK_PRECACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (flag == F2FS_GET_BLOCK_FIEMAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (map->m_next_pgofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) *map->m_next_pgofs = pgofs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (flag != F2FS_GET_BLOCK_FIEMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* for defragment case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (map->m_next_pgofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) *map->m_next_pgofs = pgofs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (flag == F2FS_GET_BLOCK_PRE_AIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (map->m_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /* preallocated unwritten block should be mapped for fiemap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (blkaddr == NEW_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) map->m_flags |= F2FS_MAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) map->m_flags |= F2FS_MAP_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) map->m_pblk = blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) map->m_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) } else if ((map->m_pblk != NEW_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) blkaddr == (map->m_pblk + ofs)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) flag == F2FS_GET_BLOCK_PRE_DIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) ofs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) map->m_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dn.ofs_in_node++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) pgofs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* preallocate blocks in batch for one dnode page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (flag == F2FS_GET_BLOCK_PRE_AIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) (pgofs == end || dn.ofs_in_node == end_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) dn.ofs_in_node = ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) err = f2fs_reserve_new_blocks(&dn, prealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) map->m_len += dn.ofs_in_node - ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) dn.ofs_in_node = end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (pgofs >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) goto sync_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) else if (dn.ofs_in_node < end_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) goto next_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (flag == F2FS_GET_BLOCK_PRECACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (map->m_flags & F2FS_MAP_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) unsigned int ofs = start_pgofs - map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) f2fs_update_extent_cache_range(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) start_pgofs, map->m_pblk + ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) map->m_len - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (map->m_may_create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) f2fs_do_map_lock(sbi, flag, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) f2fs_balance_fs(sbi, dn.node_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) goto next_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) sync_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /* for hardware encryption, but to avoid potential issue in future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) f2fs_wait_on_block_writeback_range(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) map->m_pblk, map->m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (flag == F2FS_GET_BLOCK_PRECACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (map->m_flags & F2FS_MAP_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) unsigned int ofs = start_pgofs - map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) f2fs_update_extent_cache_range(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) start_pgofs, map->m_pblk + ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) map->m_len - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (map->m_next_extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) *map->m_next_extent = pgofs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (map->m_may_create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) f2fs_do_map_lock(sbi, flag, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) f2fs_balance_fs(sbi, dn.node_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) trace_f2fs_map_blocks(inode, map, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) block_t last_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (pos + len > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) map.m_lblk = F2FS_BYTES_TO_BLK(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) map.m_next_extent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) map.m_may_create = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) last_lblk = F2FS_BLK_ALIGN(pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) while (map.m_lblk < last_lblk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) map.m_len = last_lblk - map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (err || map.m_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) map.m_lblk += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return (bytes >> inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return (blks << inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int __get_data_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct buffer_head *bh, int create, int flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) pgoff_t *next_pgofs, int seg_type, bool may_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) map.m_lblk = iblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) map.m_len = bytes_to_blks(inode, bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) map.m_next_pgofs = next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) map.m_next_extent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) map.m_seg_type = seg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) map.m_may_create = may_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) err = f2fs_map_blocks(inode, &map, create, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) map_bh(bh, inode->i_sb, map.m_pblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) bh->b_size = blks_to_bytes(inode, map.m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return __get_data_block(inode, iblock, bh_result, create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) F2FS_GET_BLOCK_DIO, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) f2fs_rw_hint_to_seg_type(inode->i_write_hint),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static int get_data_block_dio(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return __get_data_block(inode, iblock, bh_result, create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) F2FS_GET_BLOCK_DIO, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) f2fs_rw_hint_to_seg_type(inode->i_write_hint),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static int f2fs_xattr_fiemap(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) struct fiemap_extent_info *fieinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) __u64 phys = 0, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) nid_t xnid = F2FS_I(inode)->i_xattr_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (f2fs_has_inline_xattr(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) inode->i_ino, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) phys = blks_to_bytes(inode, ni.blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) offset = offsetof(struct f2fs_inode, i_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) sizeof(__le32) * (DEF_ADDRS_PER_INODE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) get_inline_xattr_addrs(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) phys += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) len = inline_xattr_size(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (!xnid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) flags |= FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (err || err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (xnid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) err = f2fs_get_node_info(sbi, xnid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) phys = blks_to_bytes(inode, ni.blk_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) len = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) flags = FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) return (err < 0 ? err : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) static loff_t max_inode_blocks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) loff_t result = ADDRS_PER_INODE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) loff_t leaf_count = ADDRS_PER_BLOCK(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* two direct node blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) result += (leaf_count * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* two indirect node blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) leaf_count *= NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) result += (leaf_count * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /* one double indirect node block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) leaf_count *= NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) result += leaf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) sector_t start_blk, last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) pgoff_t next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) u64 logical = 0, phys = 0, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) bool compr_cluster = false, compr_appended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) unsigned int count_in_cluster = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) loff_t maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ret = f2fs_precache_extents(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (start > maxbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) ret = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (len > maxbytes || (maxbytes - len) < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) len = maxbytes - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ret = f2fs_xattr_fiemap(inode, fieinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (bytes_to_blks(inode, len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) len = blks_to_bytes(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) start_blk = bytes_to_blks(inode, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) last_blk = bytes_to_blks(inode, start + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) memset(&map, 0, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) map.m_lblk = start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) map.m_len = bytes_to_blks(inode, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) map.m_next_pgofs = &next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (compr_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) map.m_lblk += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) map.m_len = cluster_size - count_in_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* HOLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) start_blk = next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) max_inode_blocks(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) goto prep_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) flags |= FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) compr_appended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /* In a case of compressed cluster, append this to the last extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) !(map.m_flags & F2FS_MAP_FLAGS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) compr_appended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) goto skip_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) flags |= FIEMAP_EXTENT_MERGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (IS_ENCRYPTED(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) ret = fiemap_fill_next_extent(fieinfo, logical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) phys, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (start_blk > last_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) skip_fill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (map.m_pblk == COMPRESS_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) compr_cluster = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) count_in_cluster = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) } else if (compr_appended) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) unsigned int appended_blks = cluster_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) count_in_cluster + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) size += blks_to_bytes(inode, appended_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) start_blk += appended_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) compr_cluster = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) logical = blks_to_bytes(inode, start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) phys = __is_valid_data_blkaddr(map.m_pblk) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) blks_to_bytes(inode, map.m_pblk) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) size = blks_to_bytes(inode, map.m_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (compr_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) flags = FIEMAP_EXTENT_ENCODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) count_in_cluster += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (count_in_cluster == cluster_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) compr_cluster = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) size += blks_to_bytes(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) flags = FIEMAP_EXTENT_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) start_blk += bytes_to_blks(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) prep_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static inline loff_t f2fs_readpage_limit(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (IS_ENABLED(CONFIG_FS_VERITY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static int f2fs_read_single_page(struct inode *inode, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) unsigned nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct f2fs_map_blocks *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) struct bio **bio_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) sector_t *last_block_in_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) bool is_readahead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) struct bio *bio = *bio_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) const unsigned blocksize = blks_to_bytes(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) sector_t block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) sector_t last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) sector_t last_block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) sector_t block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) block_in_file = (sector_t)page_index(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) last_block = block_in_file + nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) last_block_in_file = bytes_to_blks(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) f2fs_readpage_limit(inode) + blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (last_block > last_block_in_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) last_block = last_block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /* just zeroing out page which is beyond EOF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (block_in_file >= last_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) goto zero_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * Map blocks using the previous result first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if ((map->m_flags & F2FS_MAP_MAPPED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) block_in_file > map->m_lblk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) block_in_file < (map->m_lblk + map->m_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * Then do more f2fs_map_blocks() calls until we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * done with this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) map->m_lblk = block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) map->m_len = last_block - block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if ((map->m_flags & F2FS_MAP_MAPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) block_nr = map->m_pblk + block_in_file - map->m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) SetPageMappedToDisk(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (!PageUptodate(page) && (!PageSwapCache(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) !cleancache_get_page(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) goto confused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) DATA_GENERIC_ENHANCE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) zero_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (f2fs_need_verity(inode, page->index) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) !fsverity_verify_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * This page will go to BIO. Do we need to send this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * BIO off first?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) *last_block_in_bio, block_nr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) submit_and_realloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) __submit_bio(F2FS_I_SB(inode), bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (bio == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) is_readahead ? REQ_RAHEAD : 0, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (IS_ERR(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) ret = PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * If the page is under writeback, we need to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * its completion to see the correct decrypted data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) f2fs_wait_on_block_writeback(inode, block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (bio_add_page(bio, page, blocksize, 0) < blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) goto submit_and_realloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) *last_block_in_bio = block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) confused:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) __submit_bio(F2FS_I_SB(inode), bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) *bio_ret = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) unsigned nr_pages, sector_t *last_block_in_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) bool is_readahead, bool for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) struct inode *inode = cc->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct bio *bio = *bio_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) sector_t last_block_in_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) const unsigned blocksize = blks_to_bytes(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct decompress_io_ctx *dic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) last_block_in_file = bytes_to_blks(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) f2fs_readpage_limit(inode) + blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* get rid of pages beyond EOF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) struct page *page = cc->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if ((sector_t)page->index >= last_block_in_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) } else if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (for_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) cc->rpages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) cc->nr_rpages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /* we are done since all pages are beyond EOF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (f2fs_cluster_is_empty(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) for (i = 1; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) blkaddr = data_blkaddr(dn.inode, dn.node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) dn.ofs_in_node + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (!__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) cc->nr_cpages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* nothing to decompress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (cc->nr_cpages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) dic = f2fs_alloc_dic(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (IS_ERR(dic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) ret = PTR_ERR(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct page *page = dic->cpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct bio_post_read_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) blkaddr = data_blkaddr(dn.inode, dn.node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) dn.ofs_in_node + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) f2fs_wait_on_block_writeback(inode, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (atomic_dec_and_test(&dic->remaining_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) f2fs_decompress_cluster(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (bio && (!page_is_mergeable(sbi, bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) *last_block_in_bio, blkaddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) submit_and_realloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) __submit_bio(sbi, bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) is_readahead ? REQ_RAHEAD : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) page->index, for_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (IS_ERR(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) ret = PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) f2fs_decompress_end_io(dic, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) *bio_ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (bio_add_page(bio, page, blocksize, 0) < blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) goto submit_and_realloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ctx = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) ctx->enabled_steps |= STEP_DECOMPRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) refcount_inc(&dic->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) inc_page_count(sbi, F2FS_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) *last_block_in_bio = blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) *bio_ret = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) out_put_dnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (cc->rpages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ClearPageUptodate(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ClearPageError(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) unlock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) *bio_ret = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * This function was originally taken from fs/mpage.c, and customized for f2fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * Major change was from block_size == page_size in f2fs by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static int f2fs_mpage_readpages(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct readahead_control *rac, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) sector_t last_block_in_bio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct compress_ctx cc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) .inode = inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .cluster_size = F2FS_I(inode)->i_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .cluster_idx = NULL_CLUSTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) .rpages = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) .cpages = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) .nr_rpages = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) .nr_cpages = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) unsigned nr_pages = rac ? readahead_count(rac) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) unsigned max_nr_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) map.m_pblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) map.m_lblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) map.m_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) map.m_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) map.m_next_extent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) map.m_may_create = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) for (; nr_pages; nr_pages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (rac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) page = readahead_page(rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) prefetchw(&page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /* there are remained comressed pages, submit them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ret = f2fs_read_multi_pages(&cc, &bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) max_nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) &last_block_in_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) rac != NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) f2fs_destroy_compress_ctx(&cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) goto set_error_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) ret = f2fs_is_compressed_cluster(inode, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) goto set_error_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) goto read_single_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ret = f2fs_init_compress_ctx(&cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) goto set_error_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) f2fs_compress_ctx_add_page(&cc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) goto next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) read_single_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) &bio, &last_block_in_bio, rac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) set_error_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) next_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /* last page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) ret = f2fs_read_multi_pages(&cc, &bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) max_nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) &last_block_in_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) rac != NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) f2fs_destroy_compress_ctx(&cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) __submit_bio(F2FS_I_SB(inode), bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static int f2fs_read_data_page(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) struct inode *inode = page_file_mapping(page)->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) int ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) trace_f2fs_readpage(page, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (!f2fs_is_compress_backend_ready(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /* If the file has inline data, try to read it directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (f2fs_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) ret = f2fs_read_inline_data(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) ret = f2fs_mpage_readpages(inode, NULL, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static void f2fs_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct inode *inode = rac->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /* If the file has inline data, skip readpages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (f2fs_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) f2fs_mpage_readpages(inode, rac, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) struct inode *inode = fio->page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) struct page *mpage, *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) gfp_t gfp_flags = GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (!f2fs_encrypted_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) page = fio->compressed_page ? fio->compressed_page : fio->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /* wait for GCed page writeback via META_MAPPING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (fscrypt_inode_uses_inline_crypto(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) retry_encrypt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) PAGE_SIZE, 0, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (IS_ERR(fio->encrypted_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /* flush pending IOs and wait for a while in the ENOMEM case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) f2fs_flush_merged_writes(fio->sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) gfp_flags |= __GFP_NOFAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) goto retry_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return PTR_ERR(fio->encrypted_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (mpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (PageUptodate(mpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) memcpy(page_address(mpage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) page_address(fio->encrypted_page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) f2fs_put_page(mpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) static inline bool check_inplace_update_policy(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) unsigned int policy = SM_I(sbi)->ipu_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (policy & (0x1 << F2FS_IPU_FORCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (policy & (0x1 << F2FS_IPU_UTIL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) utilization(sbi) > SM_I(sbi)->min_ipu_util)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) utilization(sbi) > SM_I(sbi)->min_ipu_util)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * IPU for rewrite async pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) if (policy & (0x1 << F2FS_IPU_ASYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) fio && fio->op == REQ_OP_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) !(fio->op_flags & REQ_SYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) !IS_ENCRYPTED(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* this is only set during fdatasync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (policy & (0x1 << F2FS_IPU_FSYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) is_inode_flag_set(inode, FI_NEED_IPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) /* swap file is migrating in aligned write mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (f2fs_is_pinned_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /* if this is cold file, we should overwrite to avoid fragmentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (file_is_cold(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return check_inplace_update_policy(inode, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* The below cases were checked when setting it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (f2fs_is_pinned_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (f2fs_lfs_mode(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /* swap file is migrating in aligned write mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (fio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (page_private_gcing(fio->page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (page_private_dummy(fio->page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static inline bool need_inplace_update(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct inode *inode = fio->page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (f2fs_should_update_outplace(inode, fio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return f2fs_should_update_inplace(inode, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) int f2fs_do_write_data_page(struct f2fs_io_info *fio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) struct page *page = fio->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) struct extent_info ei = {0,0,0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) bool ipu_force = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (need_inplace_update(fio) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) f2fs_lookup_extent_cache(inode, page->index, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) fio->old_blkaddr = ei.blk + page->index - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) DATA_GENERIC_ENHANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) ipu_force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) fio->need_lock = LOCK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Deadlock due to between page->lock and f2fs_lock_op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) fio->old_blkaddr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /* This page is already truncated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (fio->old_blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * If current allocation needs SSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * it had better in-place writes for updated data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (ipu_force ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) (__is_valid_data_blkaddr(fio->old_blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) need_inplace_update(fio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) err = f2fs_encrypt_one_page(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (fio->need_lock == LOCK_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) f2fs_unlock_op(fio->sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) err = f2fs_inplace_write_data(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (fscrypt_inode_uses_fs_layer_crypto(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) fscrypt_finalize_bounce_page(&fio->encrypted_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (PageWriteback(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) set_inode_flag(inode, FI_UPDATE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) trace_f2fs_do_write_data_page(fio->page, IPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (fio->need_lock == LOCK_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (!f2fs_trylock_op(fio->sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) fio->need_lock = LOCK_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) fio->version = ni.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) err = f2fs_encrypt_one_page(fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) goto out_writepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) /* LFS mode write path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) f2fs_outplace_write_data(&dn, fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) trace_f2fs_do_write_data_page(page, OPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) set_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (page->index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) out_writepage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (fio->need_lock == LOCK_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) f2fs_unlock_op(fio->sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) int f2fs_write_single_data_page(struct page *page, int *submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) struct bio **bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) sector_t *last_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) enum iostat_type io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) int compr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) bool allow_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) loff_t i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) const pgoff_t end_index = ((unsigned long long)i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) unsigned offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) bool need_balance_fs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) .sbi = sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) .ino = inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) .type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) .op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) .op_flags = wbc_to_write_flags(wbc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) .old_blkaddr = NULL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) .submitted = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) .compr_blocks = compr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) .need_lock = LOCK_RETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) .io_type = io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) .io_wbc = wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) .bio = bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) .last_block = last_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) trace_f2fs_writepage(page, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /* we should bypass data pages to proceed the kworkder jobs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) mapping_set_error(page->mapping, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * don't drop any dirty dentry pages for keeping lastest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * directory structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (page->index < end_index ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) f2fs_verity_in_progress(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) compr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) goto write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * If the offset is out-of-range of file size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * this page does not have to be written to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) offset = i_size & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if ((page->index >= end_index + 1) || !offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) zero_user_segment(page, offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (f2fs_is_drop_cache(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /* we should not write 0'th page having journal header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (f2fs_is_volatile_file(inode) && (!page->index ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) (!wbc->for_reclaim &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) f2fs_available_free_memory(sbi, BASE_CHECK))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) /* Dentry/quota blocks are controlled by checkpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * We need to wait for node_write to avoid block allocation during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * checkpoint. This can only happen to quota writes which can cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * the below discard race condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) f2fs_down_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) fio.need_lock = LOCK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) err = f2fs_do_write_data_page(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (!wbc->for_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) need_balance_fs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) else if (has_not_enough_free_secs(sbi, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) set_inode_flag(inode, FI_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) err = f2fs_write_inline_data(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (err == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) err = f2fs_do_write_data_page(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (err == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) fio.need_lock = LOCK_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) err = f2fs_do_write_data_page(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) file_set_keep_isize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) spin_lock(&F2FS_I(inode)->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (F2FS_I(inode)->last_disk_size < psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) F2FS_I(inode)->last_disk_size = psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) spin_unlock(&F2FS_I(inode)->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (err && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) goto redirty_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) inode_dec_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (wbc->for_reclaim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) clear_inode_flag(inode, FI_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) f2fs_remove_dirty_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) submitted = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) !F2FS_I(inode)->cp_task && allow_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) f2fs_balance_fs(sbi, need_balance_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) f2fs_submit_merged_write(sbi, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) f2fs_submit_merged_ipu_write(sbi, bio, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) submitted = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (submitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) *submitted = fio.submitted ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) redirty_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) * pageout() in MM traslates EAGAIN, so calls handle_write_error()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) * -> mapping_set_error() -> set_bit(AS_EIO, ...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) * file_write_and_wait_range() will see EIO error, which is critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * to return value of fsync() followed by atomic_write failure to user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (!err || wbc->for_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) return AOP_WRITEPAGE_ACTIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) static int f2fs_write_data_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (f2fs_is_compressed_cluster(inode, page->index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) return AOP_WRITEPAGE_ACTIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) return f2fs_write_single_data_page(page, NULL, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) wbc, FS_DATA_IO, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) * This function was copied from write_cche_pages from mm/page-writeback.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * The major change is making write step of cold data page separately from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * warm/hot data page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) static int f2fs_write_cache_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) int done = 0, retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) sector_t last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) struct compress_ctx cc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) .inode = inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) .cluster_size = F2FS_I(inode)->i_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) .cluster_idx = NULL_CLUSTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) .rpages = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) .nr_rpages = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) .cpages = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) .rbuf = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) .cbuf = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) .private = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) pgoff_t end; /* Inclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) pgoff_t done_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) int range_whole = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) xa_mark_t tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) int nwritten = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) int submitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if (get_dirty_pages(mapping->host) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) set_inode_flag(mapping->host, FI_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) clear_inode_flag(mapping->host, FI_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if (wbc->range_cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) index = mapping->writeback_index; /* prev offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) index = wbc->range_start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) end = wbc->range_end >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) range_whole = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) tag = PAGECACHE_TAG_TOWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) tag = PAGECACHE_TAG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) tag_pages_for_writeback(mapping, index, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) done_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) while (!done && !retry && (index <= end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) bool need_readd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) readd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) need_readd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) ret = f2fs_init_compress_ctx(&cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (!f2fs_cluster_can_merge_page(&cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) page->index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) ret = f2fs_write_multi_pages(&cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) &submitted, wbc, io_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) need_readd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) goto result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (unlikely(f2fs_cp_error(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) goto lock_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (f2fs_cluster_is_empty(&cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) void *fsdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) struct page *pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) int ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) ret2 = f2fs_prepare_compress_overwrite(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) inode, &pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) page->index, &fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) if (ret2 < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) } else if (ret2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) !f2fs_compress_write_end(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) fsdata, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) goto lock_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) /* give a priority to WB_SYNC threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (atomic_read(&sbi->wb_sync_req[DATA]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) wbc->sync_mode == WB_SYNC_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) lock_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) done_index = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) retry_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (unlikely(page->mapping != mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) /* someone wrote it for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (PageWriteback(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (wbc->sync_mode != WB_SYNC_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) f2fs_wait_on_page_writeback(page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (!clear_page_dirty_for_io(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) f2fs_compress_ctx_add_page(&cc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) ret = f2fs_write_single_data_page(page, &submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) &bio, &last_block, wbc, io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (ret == AOP_WRITEPAGE_ACTIVATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) result:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) nwritten += submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) wbc->nr_to_write -= submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * keep nr_to_write, since vfs uses this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) * get # of written pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if (ret == AOP_WRITEPAGE_ACTIVATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) } else if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (wbc->sync_mode == WB_SYNC_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) congestion_wait(BLK_RW_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) goto retry_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) done_index = page->index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (wbc->nr_to_write <= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) wbc->sync_mode == WB_SYNC_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (need_readd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) goto readd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) /* flush remained pages in compress cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) nwritten += submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) wbc->nr_to_write -= submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) f2fs_destroy_compress_ctx(&cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (wbc->range_cyclic && !done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) done_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) mapping->writeback_index = done_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (nwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) NULL, 0, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* submit cached bio of IPU write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) static inline bool __should_serialize_io(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) /* to avoid deadlock in path of data flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) if (F2FS_I(inode)->cp_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) if (f2fs_need_compress_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (wbc->sync_mode != WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static int __f2fs_write_data_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) bool locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) /* deal with chardevs and other special file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) if (!mapping->a_ops->writepage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) /* skip writing if there is no dirty page in this inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) /* during POR, we don't need to trigger writepage at all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) wbc->sync_mode == WB_SYNC_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) f2fs_available_free_memory(sbi, DIRTY_DENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) /* skip writing during file defragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (is_inode_flag_set(inode, FI_DO_DEFRAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) trace_f2fs_writepages(mapping->host, wbc, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) atomic_inc(&sbi->wb_sync_req[DATA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) else if (atomic_read(&sbi->wb_sync_req[DATA])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /* to avoid potential deadlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (current->plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) blk_finish_plug(current->plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) goto skip_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (__should_serialize_io(inode, wbc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) mutex_lock(&sbi->writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) ret = f2fs_write_cache_pages(mapping, wbc, io_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) mutex_unlock(&sbi->writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) atomic_dec(&sbi->wb_sync_req[DATA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) * if some pages were truncated, we cannot guarantee its mapping->host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * to detect pending bios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) f2fs_remove_dirty_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) skip_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) wbc->pages_skipped += get_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) trace_f2fs_writepages(mapping->host, wbc, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) static int f2fs_write_data_pages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return __f2fs_write_data_pages(mapping, wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) F2FS_I(inode)->cp_task == current ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) FS_CP_DATA_IO : FS_DATA_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) static void f2fs_write_failed(struct address_space *mapping, loff_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) loff_t i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) if (to > i_size && !f2fs_verity_in_progress(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) truncate_pagecache(inode, i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) f2fs_truncate_blocks(inode, i_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) static int prepare_write_begin(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) struct page *page, loff_t pos, unsigned len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) block_t *blk_addr, bool *node_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) pgoff_t index = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) bool locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) struct extent_info ei = {0,0,0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) * we already allocated all the blocks, so we don't need to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) * the block addresses when there is no need to fill the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) !f2fs_verity_in_progress(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) /* f2fs_lock_op avoids race between write CP and convert_inline_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) flag = F2FS_GET_BLOCK_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) flag = F2FS_GET_BLOCK_PRE_AIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (f2fs_has_inline_data(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) (pos & PAGE_MASK) >= i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) f2fs_do_map_lock(sbi, flag, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) /* check inline_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) err = PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) set_new_dnode(&dn, inode, ipage, ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (pos + len <= MAX_INLINE_DATA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) f2fs_do_read_inline_data(page, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) set_inode_flag(inode, FI_DATA_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) set_page_private_inline(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) err = f2fs_convert_inline_page(&dn, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (dn.data_blkaddr == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) err = f2fs_get_block(&dn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) } else if (locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) err = f2fs_get_block(&dn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) if (f2fs_lookup_extent_cache(inode, index, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) dn.data_blkaddr = ei.blk + index - ei.fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) /* hole case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) if (err || dn.data_blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) /* convert_inline_page can make node_changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) *blk_addr = dn.data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) *node_changed = dn.node_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) f2fs_do_map_lock(sbi, flag, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) static int f2fs_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) bool need_balance = false, drop_atomic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) block_t blkaddr = NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) * Should avoid quota operations which can make deadlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) * kswapd -> f2fs_evict_inode -> dquot_drop ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) * f2fs_dquot_commit -> f2fs_write_begin ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) * d_obtain_alias -> __d_alloc -> kmem_cache_alloc(GFP_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) if (trace_android_fs_datawrite_start_enabled() && !IS_NOQUOTA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) path = android_fstrace_get_pathname(pathbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) MAX_TRACE_PATHBUF_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) trace_android_fs_datawrite_start(inode, pos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) current->pid, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) trace_f2fs_write_begin(inode, pos, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (!f2fs_is_checkpoint_ready(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) if ((f2fs_is_atomic_file(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) drop_atomic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * We should check this at this moment to avoid deadlock on inode page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * and #0 page. The locking rule for inline_data conversion should be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) * lock_page(page #0) -> lock_page(inode_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) if (index != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) *fsdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) ret = f2fs_prepare_compress_overwrite(inode, pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) index, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) } else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) * Do not use grab_cache_page_write_begin() to avoid deadlock due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) * wait_for_stable_page. Will wait that below with our IO control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) page = f2fs_pagecache_get_page(mapping, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) /* TODO: cluster can be compressed due to race with .writepage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) err = prepare_write_begin(sbi, page, pos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) &blkaddr, &need_balance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) if (need_balance && !IS_NOQUOTA(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) has_not_enough_free_secs(sbi, 0, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) if (page->mapping != mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) /* The page got truncated from under us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) f2fs_wait_on_page_writeback(page, DATA, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if (len == PAGE_SIZE || PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) !f2fs_verity_in_progress(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) zero_user_segment(page, len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) if (blkaddr == NEW_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) DATA_GENERIC_ENHANCE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) if (unlikely(page->mapping != mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) f2fs_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) if (drop_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) f2fs_drop_inmem_pages_all(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) static int f2fs_write_end(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) trace_android_fs_datawrite_end(inode, pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) trace_f2fs_write_end(inode, pos, len, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) * This should be come from len == PAGE_SIZE, and we expect copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) * let generic_perform_write() try to copy data again through copied=0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (unlikely(copied != len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) /* overwrite compressed file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (f2fs_compressed_file(inode) && fsdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) f2fs_compress_write_end(inode, fsdata, page->index, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (pos + copied > i_size_read(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) !f2fs_verity_in_progress(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) f2fs_i_size_write(inode, pos + copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (!copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) if (pos + copied > i_size_read(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) !f2fs_verity_in_progress(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) f2fs_i_size_write(inode, pos + copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) unsigned blkbits = i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) unsigned blocksize_mask = (1 << blkbits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) unsigned long align = offset | iov_iter_alignment(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) struct block_device *bdev = inode->i_sb->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (align & blocksize_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) blkbits = blksize_bits(bdev_logical_block_size(bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) blocksize_mask = (1 << blkbits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) if (align & blocksize_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) static void f2fs_dio_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) struct f2fs_private_dio *dio = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) dec_page_count(F2FS_I_SB(dio->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) bio->bi_private = dio->orig_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) bio->bi_end_io = dio->orig_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) kfree(dio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) loff_t file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) struct f2fs_private_dio *dio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) bool write = (bio_op(bio) == REQ_OP_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) dio = f2fs_kzalloc(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) sizeof(struct f2fs_private_dio), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) if (!dio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) dio->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) dio->orig_end_io = bio->bi_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) dio->orig_private = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) dio->write = write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) bio->bi_end_io = f2fs_dio_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) bio->bi_private = dio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) inc_page_count(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) struct address_space *mapping = iocb->ki_filp->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) loff_t offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) int rw = iov_iter_rw(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) enum rw_hint hint = iocb->ki_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) int whint_mode = F2FS_OPTION(sbi).whint_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) bool do_opu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) err = check_direct_IO(inode, iter, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) return err < 0 ? err : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) if (f2fs_force_buffered_io(inode, iocb, iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) do_opu = allow_outplace_dio(inode, iocb, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) trace_f2fs_direct_IO_enter(inode, offset, count, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) if (trace_android_fs_dataread_start_enabled() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) (rw == READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) path = android_fstrace_get_pathname(pathbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) MAX_TRACE_PATHBUF_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) trace_android_fs_dataread_start(inode, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) count, current->pid, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) if (trace_android_fs_datawrite_start_enabled() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) (rw == WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) path = android_fstrace_get_pathname(pathbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) MAX_TRACE_PATHBUF_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) trace_android_fs_datawrite_start(inode, offset, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) current->pid, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) iocb->ki_hint = WRITE_LIFE_NOT_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[rw])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) iocb->ki_hint = hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) f2fs_up_read(&fi->i_gc_rwsem[rw]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) iocb->ki_hint = hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) f2fs_down_read(&fi->i_gc_rwsem[rw]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (do_opu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) f2fs_down_read(&fi->i_gc_rwsem[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) iter, rw == WRITE ? get_data_block_dio_write :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) get_data_block_dio, NULL, f2fs_dio_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) DIO_SKIP_HOLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (do_opu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) f2fs_up_read(&fi->i_gc_rwsem[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) f2fs_up_read(&fi->i_gc_rwsem[rw]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (rw == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) if (whint_mode == WHINT_MODE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) iocb->ki_hint = hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) if (!do_opu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) set_inode_flag(inode, FI_UPDATE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) } else if (err == -EIOCBQUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) count - iov_iter_count(iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) } else if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) f2fs_write_failed(mapping, offset + count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) else if (err == -EIOCBQUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) count - iov_iter_count(iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) if (trace_android_fs_dataread_start_enabled() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) (rw == READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) trace_android_fs_dataread_end(inode, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) if (trace_android_fs_datawrite_start_enabled() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) (rw == WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) trace_android_fs_datawrite_end(inode, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) void f2fs_invalidate_page(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) (offset % PAGE_SIZE || length != PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) if (PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) if (inode->i_ino == F2FS_META_INO(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) dec_page_count(sbi, F2FS_DIRTY_META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) dec_page_count(sbi, F2FS_DIRTY_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) inode_dec_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) f2fs_remove_dirty_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) if (test_opt(sbi, COMPRESS_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) f2fs_invalidate_compress_pages(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) clear_page_private_data(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) if (page_private_atomic(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) return f2fs_drop_inmem_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) detach_page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) set_page_private(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) int f2fs_release_page(struct page *page, gfp_t wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) /* If this is dirty page, keep PagePrivate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) if (PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) /* This is atomic written page, keep Private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (page_private_atomic(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) struct f2fs_sb_info *sbi = F2FS_P_SB(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) f2fs_invalidate_compress_pages(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) clear_page_private_data(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) clear_page_private_gcing(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) detach_page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) set_page_private(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) static int f2fs_set_data_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) struct inode *inode = page_file_mapping(page)->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) trace_f2fs_set_page_dirty(page, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) if (PageSwapCache(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) return __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) if (!page_private_atomic(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) f2fs_register_inmem_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) * Previously, this page has been registered, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) * return here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) if (!PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) f2fs_update_dirty_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) sector_t start_idx, blknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (dn.data_blkaddr != COMPRESS_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) dn.ofs_in_node += block - start_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) blknr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) if (!__is_valid_data_blkaddr(blknr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) blknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) return blknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) sector_t blknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) if (f2fs_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) /* make sure allocating whole blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) filemap_write_and_wait(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) /* Block number less than F2FS MAX BLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) if (unlikely(block >= max_file_blocks(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) blknr = f2fs_bmap_compress(inode, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) memset(&map, 0, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) map.m_lblk = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) map.m_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) blknr = map.m_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) trace_f2fs_bmap(inode, block, blknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) return blknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) #include <linux/migrate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) int f2fs_migrate_page(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) struct page *newpage, struct page *page, enum migrate_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) int rc, extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) struct f2fs_inode_info *fi = F2FS_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) bool atomic_written = page_private_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) /* migrating an atomic written page is safe with the inmem_lock hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) if (atomic_written) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (mode != MIGRATE_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (!mutex_trylock(&fi->inmem_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) /* one extra reference was held for atomic_write page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) extra_count = atomic_written ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) rc = migrate_page_move_mapping(mapping, newpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) page, extra_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (rc != MIGRATEPAGE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) if (atomic_written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) mutex_unlock(&fi->inmem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (atomic_written) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) struct inmem_pages *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) list_for_each_entry(cur, &fi->inmem_pages, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) if (cur->page == page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) cur->page = newpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) mutex_unlock(&fi->inmem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) get_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) /* guarantee to start from no stale private field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) set_page_private(newpage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) if (PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) set_page_private(newpage, page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) SetPagePrivate(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) get_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) set_page_private(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) ClearPagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (mode != MIGRATE_SYNC_NO_COPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) migrate_page_copy(newpage, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) migrate_page_states(newpage, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) return MIGRATEPAGE_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) #ifdef CONFIG_SWAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) unsigned int blkcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) unsigned int blkofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) unsigned int secidx = start_blk / blk_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) unsigned int end_sec = secidx + blkcnt / blk_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) set_inode_flag(inode, FI_ALIGNED_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) for (; secidx < end_sec; secidx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) f2fs_down_write(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) set_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) unsigned int blkidx = secidx * blk_per_sec + blkofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) page = f2fs_get_lock_data_page(inode, blkidx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) f2fs_up_write(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) ret = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) clear_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) ret = filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) f2fs_up_write(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) clear_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) clear_inode_flag(inode, FI_ALIGNED_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) static int check_swap_activate(struct swap_info_struct *sis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) struct file *swap_file, sector_t *span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) struct address_space *mapping = swap_file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) sector_t cur_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) sector_t last_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) sector_t pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) sector_t lowest_pblock = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) sector_t highest_pblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) int nr_extents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) unsigned long nr_pblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) unsigned int not_aligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) * Map all the blocks into the extent list. This code doesn't try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * to be very smart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) cur_lblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) last_lblock = bytes_to_blks(inode, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) while (cur_lblock < last_lblock && cur_lblock < sis->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) memset(&map, 0, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) map.m_lblk = cur_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) map.m_len = last_lblock - cur_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) map.m_next_extent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) map.m_may_create = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) /* hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) if (!(map.m_flags & F2FS_MAP_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) f2fs_err(sbi, "Swapfile has holes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) pblock = map.m_pblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) nr_pblocks = map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) nr_pblocks & sec_blks_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) not_aligned++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) nr_pblocks = roundup(nr_pblocks, blks_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) if (cur_lblock + nr_pblocks > sis->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) nr_pblocks -= blks_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) if (!nr_pblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) /* this extent is last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) nr_pblocks = map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) ret = f2fs_migrate_blocks(inode, cur_lblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) nr_pblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) if (cur_lblock + nr_pblocks >= sis->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) nr_pblocks = sis->max - cur_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) if (cur_lblock) { /* exclude the header page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) if (pblock < lowest_pblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) lowest_pblock = pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) if (pblock + nr_pblocks - 1 > highest_pblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) highest_pblock = pblock + nr_pblocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) nr_extents += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) cur_lblock += nr_pblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) ret = nr_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) *span = 1 + highest_pblock - lowest_pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) if (cur_lblock == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) cur_lblock = 1; /* force Empty message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) sis->max = cur_lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) sis->pages = cur_lblock - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) sis->highest_bit = cur_lblock - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) if (not_aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) not_aligned, blks_per_sec * F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) sector_t *span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (f2fs_readonly(F2FS_I_SB(inode)->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) f2fs_err(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) "Swapfile not supported in LFS mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if (!f2fs_disable_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) f2fs_precache_extents(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) ret = check_swap_activate(sis, file, span);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) set_inode_flag(inode, FI_PIN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) static void f2fs_swap_deactivate(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) clear_inode_flag(inode, FI_PIN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) sector_t *span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) static void f2fs_swap_deactivate(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) const struct address_space_operations f2fs_dblock_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) .readpage = f2fs_read_data_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) .readahead = f2fs_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) .writepage = f2fs_write_data_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) .writepages = f2fs_write_data_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) .write_begin = f2fs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) .write_end = f2fs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) .set_page_dirty = f2fs_set_data_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) .invalidatepage = f2fs_invalidate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) .releasepage = f2fs_release_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) .direct_IO = f2fs_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) .bmap = f2fs_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) .swap_activate = f2fs_swap_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) .swap_deactivate = f2fs_swap_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) .migratepage = f2fs_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) void f2fs_clear_page_cache_dirty_tag(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) struct address_space *mapping = page_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) xa_lock_irqsave(&mapping->i_pages, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) __xa_clear_mark(&mapping->i_pages, page_index(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) PAGECACHE_TAG_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) xa_unlock_irqrestore(&mapping->i_pages, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) int __init f2fs_init_post_read_processing(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) bio_post_read_ctx_cache =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) kmem_cache_create("f2fs_bio_post_read_ctx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) sizeof(struct bio_post_read_ctx), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) if (!bio_post_read_ctx_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) bio_post_read_ctx_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) bio_post_read_ctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) if (!bio_post_read_ctx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) goto fail_free_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) fail_free_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) kmem_cache_destroy(bio_post_read_ctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) void f2fs_destroy_post_read_processing(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) mempool_destroy(bio_post_read_ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) kmem_cache_destroy(bio_post_read_ctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) if (!f2fs_sb_has_encrypt(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) !f2fs_sb_has_verity(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) !f2fs_sb_has_compression(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) WQ_UNBOUND | WQ_HIGHPRI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) if (!sbi->post_read_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (sbi->post_read_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) destroy_workqueue(sbi->post_read_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) int __init f2fs_init_bio_entry_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) sizeof(struct bio_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (!bio_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) void f2fs_destroy_bio_entry_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) kmem_cache_destroy(bio_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) }