^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * f2fs compress support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2019 Chao Yu <chao@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/lzo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/lz4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/zstd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static struct kmem_cache *cic_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct kmem_cache *dic_entry_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void *page_array_alloc(struct inode *inode, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int size = sizeof(struct page *) * nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (likely(size <= sbi->page_array_slab_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return f2fs_kzalloc(sbi, size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void page_array_free(struct inode *inode, void *pages, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int size = sizeof(struct page *) * nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (likely(size <= sbi->page_array_slab_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) kmem_cache_free(sbi->page_array_slab, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct f2fs_compress_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int (*init_compress_ctx)(struct compress_ctx *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void (*destroy_compress_ctx)(struct compress_ctx *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int (*compress_pages)(struct compress_ctx *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int (*decompress_pages)(struct decompress_io_ctx *dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return index & (cc->cluster_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return index >> cc->log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return cc->cluster_idx << cc->log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bool f2fs_is_compressed_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!page_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (page_private_nonpointer(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) f2fs_bug_on(F2FS_M_SB(page->mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void f2fs_set_compressed_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct inode *inode, pgoff_t index, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) attach_page_private(page, (void *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* i_crypto_info and iv index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) page->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) page->mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unlock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) put_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void f2fs_put_rpages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) f2fs_drop_rpages(cc, cc->cluster_size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) f2fs_drop_rpages(cc, len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct writeback_control *wbc, bool redirty, int unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (redirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) redirty_page_for_writepage(wbc, cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) f2fs_put_page(cc->rpages[i], unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct page *f2fs_compress_control_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return ((struct compress_io_ctx *)page_private(page))->rpages[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int f2fs_init_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (cc->rpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return cc->rpages ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) page_array_free(cc->inode, cc->rpages, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cc->rpages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cc->nr_rpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) cc->nr_cpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!reuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) cc->cluster_idx = NULL_CLUSTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned int cluster_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!f2fs_cluster_can_merge_page(cc, page->index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) cluster_ofs = offset_in_cluster(cc, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cc->rpages[cluster_ofs] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cc->nr_rpages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cc->cluster_idx = cluster_idx(cc, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #ifdef CONFIG_F2FS_FS_LZO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int lzo_init_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) LZO1X_MEM_COMPRESS, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!cc->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) kvfree(cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cc->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int lzo_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) &cc->clen, cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ret != LZO_E_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int lzo_decompress_pages(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dic->rbuf, &dic->rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret != LZO_E_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) "expected:%lu\n", KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) F2FS_I_SB(dic->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dic->rlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) PAGE_SIZE << dic->log_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static const struct f2fs_compress_ops f2fs_lzo_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .init_compress_ctx = lzo_init_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .destroy_compress_ctx = lzo_destroy_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .compress_pages = lzo_compress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .decompress_pages = lzo_decompress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #ifdef CONFIG_F2FS_FS_LZ4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int lz4_init_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned int size = LZ4_MEM_COMPRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #ifdef CONFIG_F2FS_FS_LZ4HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) size = LZ4HC_MEM_COMPRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!cc->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * we do not change cc->clen to LZ4_compressBound(inputsize) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * adapt worst compress case, because lz4 compressor can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * output budget properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kvfree(cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) cc->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef CONFIG_F2FS_FS_LZ4HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int lz4hc_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) COMPRESS_LEVEL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cc->clen, level, cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cc->clen, cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cc->clen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int lz4_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_F2FS_FS_LZ4HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return lz4hc_compress_pages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) cc->clen, cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) cc->clen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int lz4_decompress_pages(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dic->clen, dic->rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (ret != PAGE_SIZE << dic->log_cluster_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) "expected:%lu\n", KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) F2FS_I_SB(dic->inode)->sb->s_id, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) PAGE_SIZE << dic->log_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static const struct f2fs_compress_ops f2fs_lz4_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .init_compress_ctx = lz4_init_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .destroy_compress_ctx = lz4_destroy_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .compress_pages = lz4_compress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .decompress_pages = lz4_decompress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #ifdef CONFIG_F2FS_FS_ZSTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define F2FS_ZSTD_DEFAULT_CLEVEL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int zstd_init_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ZSTD_parameters params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ZSTD_CStream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void *workspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned int workspace_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) COMPRESS_LEVEL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) level = F2FS_ZSTD_DEFAULT_CLEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) params = ZSTD_getParams(level, cc->rlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) workspace_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!workspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!stream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kvfree(workspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) cc->private = workspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cc->private2 = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kvfree(cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cc->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) cc->private2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int zstd_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ZSTD_CStream *stream = cc->private2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ZSTD_inBuffer inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ZSTD_outBuffer outbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int src_size = cc->rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) inbuf.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) inbuf.src = cc->rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) inbuf.size = src_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) outbuf.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) outbuf.dst = cc->cbuf->cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) outbuf.size = dst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (ZSTD_isError(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) __func__, ZSTD_getErrorCode(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ret = ZSTD_endStream(stream, &outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ZSTD_isError(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __func__, ZSTD_getErrorCode(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * there is compressed data remained in intermediate buffer due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * no more space in cbuf.cdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cc->clen = outbuf.pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ZSTD_DStream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) void *workspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned int workspace_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int max_window_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) workspace_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!workspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!stream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) kvfree(workspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dic->private = workspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dic->private2 = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) kvfree(dic->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dic->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dic->private2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int zstd_decompress_pages(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ZSTD_DStream *stream = dic->private2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ZSTD_inBuffer inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ZSTD_outBuffer outbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) inbuf.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) inbuf.src = dic->cbuf->cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) inbuf.size = dic->clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) outbuf.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) outbuf.dst = dic->rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) outbuf.size = dic->rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (ZSTD_isError(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) __func__, ZSTD_getErrorCode(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (dic->rlen != outbuf.pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) "expected:%lu\n", KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) F2FS_I_SB(dic->inode)->sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __func__, dic->rlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) PAGE_SIZE << dic->log_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static const struct f2fs_compress_ops f2fs_zstd_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .init_compress_ctx = zstd_init_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .destroy_compress_ctx = zstd_destroy_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .compress_pages = zstd_compress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .init_decompress_ctx = zstd_init_decompress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .decompress_pages = zstd_decompress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #ifdef CONFIG_F2FS_FS_LZO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #ifdef CONFIG_F2FS_FS_LZORLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int lzorle_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) &cc->clen, cc->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (ret != LZO_E_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static const struct f2fs_compress_ops f2fs_lzorle_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .init_compress_ctx = lzo_init_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .destroy_compress_ctx = lzo_destroy_compress_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .compress_pages = lzorle_compress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .decompress_pages = lzo_decompress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #ifdef CONFIG_F2FS_FS_LZO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) &f2fs_lzo_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #ifdef CONFIG_F2FS_FS_LZ4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) &f2fs_lz4_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #ifdef CONFIG_F2FS_FS_ZSTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &f2fs_zstd_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) &f2fs_lzorle_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bool f2fs_is_compress_backend_ready(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static mempool_t *compress_page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int num_compress_pages = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) module_param(num_compress_pages, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) MODULE_PARM_DESC(num_compress_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) "Number of intermediate compress pages to preallocate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int f2fs_init_compress_mempool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!compress_page_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void f2fs_destroy_compress_mempool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mempool_destroy(compress_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static struct page *f2fs_compress_alloc_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) page = mempool_alloc(compress_page_pool, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void f2fs_compress_free_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) detach_page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) page->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mempool_free(page, compress_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #define MAX_VMAP_RETRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void *f2fs_vmap(struct page **pages, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (i = 0; i < MAX_VMAP_RETRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) buf = vm_map_ram(pages, count, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) vm_unmap_aliases();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int f2fs_compress_pages(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct f2fs_inode_info *fi = F2FS_I(cc->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) const struct f2fs_compress_ops *cops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) f2fs_cops[fi->i_compress_algorithm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned int max_len, new_nr_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct page **new_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u32 chksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cc->cluster_size, fi->i_compress_algorithm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (cops->init_compress_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = cops->init_compress_ctx(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) max_len = COMPRESS_HEADER_SIZE + cc->clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!cc->cpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto destroy_compress_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cc->cpages[i] = f2fs_compress_alloc_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!cc->cpages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto out_free_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!cc->rbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) goto out_free_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!cc->cbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) goto out_vunmap_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ret = cops->compress_pages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto out_vunmap_cbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (cc->clen > max_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto out_vunmap_cbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cc->cbuf->clen = cpu_to_le32(cc->clen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) cc->cbuf->cdata, cc->clen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) cc->cbuf->chksum = cpu_to_le32(chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) cc->cbuf->reserved[i] = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Now we're going to cut unnecessary tail pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!new_cpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) goto out_vunmap_cbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* zero out any unused part of the last page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) memset(&cc->cbuf->cdata[cc->clen], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) (new_nr_cpages * PAGE_SIZE) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) (cc->clen + COMPRESS_HEADER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) vm_unmap_ram(cc->cbuf, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) vm_unmap_ram(cc->rbuf, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (i < new_nr_cpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) new_cpages[i] = cc->cpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) f2fs_compress_free_page(cc->cpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) cc->cpages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (cops->destroy_compress_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cops->destroy_compress_ctx(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) cc->cpages = new_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) cc->nr_cpages = new_nr_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) cc->clen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) out_vunmap_cbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) vm_unmap_ram(cc->cbuf, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) out_vunmap_rbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) vm_unmap_ram(cc->rbuf, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) out_free_cpages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (cc->cpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) f2fs_compress_free_page(cc->cpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cc->cpages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) destroy_compress_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (cops->destroy_compress_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cops->destroy_compress_ctx(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) cc->clen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct f2fs_inode_info *fi = F2FS_I(dic->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) const struct f2fs_compress_ops *cops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) f2fs_cops[fi->i_compress_algorithm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dic->cluster_size, fi->i_compress_algorithm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (dic->failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto out_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!dic->tpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto out_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) for (i = 0; i < dic->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (dic->rpages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dic->tpages[i] = dic->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dic->tpages[i] = f2fs_compress_alloc_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!dic->tpages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto out_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (cops->init_decompress_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = cops->init_decompress_ctx(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto out_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!dic->rbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto out_destroy_decompress_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!dic->cbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) goto out_vunmap_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dic->clen = le32_to_cpu(dic->cbuf->clen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dic->rlen = PAGE_SIZE << dic->log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto out_vunmap_cbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = cops->decompress_pages(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) u32 provided = le32_to_cpu(dic->cbuf->chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (provided != calculated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) printk_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) provided, calculated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) out_vunmap_cbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) vm_unmap_ram(dic->cbuf, dic->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) out_vunmap_rbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) vm_unmap_ram(dic->rbuf, dic->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) out_destroy_decompress_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (cops->destroy_decompress_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cops->destroy_decompress_ctx(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) out_end_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dic->clen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) f2fs_decompress_end_io(dic, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * This is called when a page of a compressed cluster has been read from disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * (or failed to be read from disk). It checks whether this page was the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * page being waited on in the cluster, and if so, it decompresses the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * (or in the case of a failure, cleans up without actually decompressing).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) void f2fs_end_read_compressed_page(struct page *page, bool failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct decompress_io_ctx *dic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) (struct decompress_io_ctx *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dec_page_count(sbi, F2FS_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (failed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) WRITE_ONCE(dic->failed, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) else if (blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) f2fs_cache_compressed_page(sbi, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dic->inode->i_ino, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (atomic_dec_and_test(&dic->remaining_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) f2fs_decompress_cluster(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (cc->cluster_idx == NULL_CLUSTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return cc->cluster_idx == cluster_idx(cc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) bool f2fs_cluster_is_empty(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return cc->nr_rpages == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static bool f2fs_cluster_is_full(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return cc->cluster_size == cc->nr_rpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (f2fs_cluster_is_empty(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return is_page_in_cluster(cc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static bool cluster_has_invalid_data(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) loff_t i_size = i_size_read(cc->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct page *page = cc->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* beyond EOF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (page->index >= nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static int __f2fs_cluster_blocks(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned int cluster_idx, bool compr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) unsigned int start_idx = cluster_idx <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) F2FS_I(inode)->i_log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (dn.data_blkaddr == COMPRESS_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) for (i = 1; i < cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) blkaddr = data_blkaddr(dn.inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dn.node_page, dn.ofs_in_node + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (compr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (blkaddr != NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) f2fs_bug_on(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) !compr && ret != cluster_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* return # of compressed blocks in compressed cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int f2fs_compressed_blocks(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* return # of valid blocks in compressed cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return __f2fs_cluster_blocks(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) index >> F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static bool cluster_may_compress(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (!f2fs_need_compress_data(cc->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (f2fs_is_atomic_file(cc->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!f2fs_cluster_is_full(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return !cluster_has_invalid_data(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void set_cluster_writeback(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) set_page_writeback(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static void set_cluster_dirty(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) for (i = 0; i < cc->cluster_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) set_page_dirty(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static int prepare_compress_overwrite(struct compress_ctx *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct page **pagep, pgoff_t index, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct address_space *mapping = cc->inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sector_t last_block_in_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pgoff_t start_idx = start_idx_of_cluster(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = f2fs_init_compress_ctx(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* keep page reference to avoid page reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) page = f2fs_pagecache_get_page(mapping, start_idx + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) fgp_flag, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) goto unlock_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) f2fs_compress_ctx_add_page(cc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!f2fs_cluster_is_empty(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) &last_block_in_bio, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) f2fs_put_rpages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) f2fs_destroy_compress_ctx(cc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) f2fs_submit_bio(sbi, bio, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ret = f2fs_init_compress_ctx(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) f2fs_bug_on(sbi, cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) page = find_lock_page(mapping, start_idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* page can be truncated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto release_and_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) f2fs_compress_ctx_add_page(cc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) release_and_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) f2fs_put_rpages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) f2fs_unlock_rpages(cc, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) f2fs_destroy_compress_ctx(cc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (likely(!ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) *fsdata = cc->rpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *pagep = cc->rpages[offset_in_cluster(cc, index)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return cc->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) unlock_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) f2fs_put_rpages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) f2fs_unlock_rpages(cc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) f2fs_destroy_compress_ctx(cc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int f2fs_prepare_compress_overwrite(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct page **pagep, pgoff_t index, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct compress_ctx cc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .inode = inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) .cluster_size = F2FS_I(inode)->i_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) .rpages = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .nr_rpages = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return prepare_compress_overwrite(&cc, pagep, index, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) pgoff_t index, unsigned copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct compress_ctx cc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) .inode = inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .cluster_size = F2FS_I(inode)->i_cluster_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .rpages = fsdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) bool first_index = (index == cc.rpages[0]->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) set_cluster_dirty(&cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) f2fs_put_rpages_wbc(&cc, NULL, false, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) f2fs_destroy_compress_ctx(&cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return first_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) void *fsdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct page *pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) err = f2fs_is_compressed_cluster(inode, start_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* truncate normal cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return f2fs_do_truncate_blocks(inode, from, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* truncate compressed cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err = f2fs_prepare_compress_overwrite(inode, &pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) start_idx, &fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* should not be a normal cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) f2fs_bug_on(F2FS_I_SB(inode), err == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct page **rpages = fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int cluster_size = F2FS_I(inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) for (i = cluster_size - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) loff_t start = rpages[i]->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (from <= start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) zero_user_segment(rpages[i], 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) zero_user_segment(rpages[i], from - start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) f2fs_compress_write_end(inode, fsdata, start_idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int f2fs_write_compressed_pages(struct compress_ctx *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int *submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct inode *inode = cc->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .sbi = sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .ino = cc->inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) .op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .op_flags = wbc_to_write_flags(wbc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) .old_blkaddr = NEW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .compressed_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .submitted = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .io_type = io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) .io_wbc = wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct compress_io_ctx *cic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) pgoff_t start_idx = start_idx_of_cluster(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned int last_index = cc->cluster_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) loff_t psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* we should bypass data pages to proceed the kworkder jobs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) mapping_set_error(cc->rpages[0]->mapping, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (IS_NOQUOTA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * We need to wait for node_write to avoid block allocation during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * checkpoint. This can only happen to quota writes which can cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * the below discard race condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) f2fs_down_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) } else if (!f2fs_trylock_op(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) goto out_unlock_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (data_blkaddr(dn.inode, dn.node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) dn.ofs_in_node + i) == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) fio.version = ni.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!cic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto out_put_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) cic->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) atomic_set(&cic->pending_pages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!cic->rpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto out_put_cic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) cic->nr_rpages = cc->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) f2fs_set_compressed_page(cc->cpages[i], inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) cc->rpages[i + 1]->index, cic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) fio.compressed_page = cc->cpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dn.ofs_in_node + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* wait for GCed page writeback via META_MAPPING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (fio.encrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) fio.page = cc->rpages[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) err = f2fs_encrypt_one_page(&fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) goto out_destroy_crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) cc->cpages[i] = fio.encrypted_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) set_cluster_writeback(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) for (i = 0; i < cc->cluster_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) cic->rpages[i] = cc->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) fio.page = cc->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) fio.old_blkaddr = blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* cluster header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (blkaddr == COMPRESS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) fio.compr_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) f2fs_invalidate_blocks(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto unlock_continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) fio.compr_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (i > cc->nr_cpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (__is_valid_data_blkaddr(blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) f2fs_invalidate_blocks(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) f2fs_update_data_blkaddr(&dn, NEW_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) goto unlock_continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (fio.encrypted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) fio.encrypted_page = cc->cpages[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) fio.compressed_page = cc->cpages[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) cc->cpages[i - 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) f2fs_outplace_write_data(&dn, &fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) (*submitted)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) unlock_continue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) inode_dec_dirty_pages(cc->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) unlock_page(fio.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (fio.compr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) add_compr_block_stat(inode, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) set_inode_flag(cc->inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (cc->cluster_idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) spin_lock(&fi->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (fi->last_disk_size < psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) fi->last_disk_size = psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) spin_unlock(&fi->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) f2fs_put_rpages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) cc->cpages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) f2fs_destroy_compress_ctx(cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) out_destroy_crypt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) page_array_free(cc->inode, cic->rpages, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) for (--i; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) fscrypt_finalize_bounce_page(&cc->cpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) out_put_cic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) kmem_cache_free(cic_entry_slab, cic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) out_put_dnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) out_unlock_op:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) f2fs_up_read(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) for (i = 0; i < cc->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (!cc->cpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) f2fs_compress_free_page(cc->cpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cc->cpages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) cc->cpages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct f2fs_sb_info *sbi = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct compress_io_ctx *cic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) (struct compress_io_ctx *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (unlikely(bio->bi_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) mapping_set_error(cic->inode->i_mapping, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) f2fs_compress_free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dec_page_count(sbi, F2FS_WB_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (atomic_dec_return(&cic->pending_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) for (i = 0; i < cic->nr_rpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) WARN_ON(!cic->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) clear_page_private_gcing(cic->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) end_page_writeback(cic->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) kmem_cache_free(cic_entry_slab, cic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static int f2fs_write_raw_pages(struct compress_ctx *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) int *submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct address_space *mapping = cc->inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) int _submitted, compr_blocks, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) compr_blocks = f2fs_compressed_blocks(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) redirty_page_for_writepage(wbc, cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) unlock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (compr_blocks < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return compr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) for (i = 0; i < cc->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!cc->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) retry_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) lock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (cc->rpages[i]->mapping != mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) continue_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) unlock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!PageDirty(cc->rpages[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (!clear_page_dirty_for_io(cc->rpages[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) goto continue_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) NULL, NULL, wbc, io_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) compr_blocks, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (ret == AOP_WRITEPAGE_ACTIVATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) unlock_page(cc->rpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) } else if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * for quota file, just redirty left pages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * avoid deadlock caused by cluster update race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * from foreground operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (IS_NOQUOTA(cc->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) congestion_wait(BLK_RW_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto retry_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) *submitted += _submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) f2fs_balance_fs(F2FS_M_SB(mapping), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int f2fs_write_multi_pages(struct compress_ctx *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) int *submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct writeback_control *wbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) enum iostat_type io_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *submitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (cluster_may_compress(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) err = f2fs_compress_pages(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (err == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) } else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) f2fs_put_rpages_wbc(cc, wbc, true, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) goto destroy_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = f2fs_write_compressed_pages(cc, submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) wbc, io_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) f2fs_put_rpages_wbc(cc, wbc, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) destroy_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) f2fs_destroy_compress_ctx(cc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static void f2fs_free_dic(struct decompress_io_ctx *dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct decompress_io_ctx *dic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) pgoff_t start_idx = start_idx_of_cluster(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (!dic->rpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) kmem_cache_free(dic_entry_slab, dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) dic->inode = cc->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) atomic_set(&dic->remaining_pages, cc->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dic->cluster_idx = cc->cluster_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) dic->cluster_size = cc->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dic->log_cluster_size = cc->log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dic->nr_cpages = cc->nr_cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) refcount_set(&dic->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dic->failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) for (i = 0; i < dic->cluster_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) dic->rpages[i] = cc->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dic->nr_rpages = cc->cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!dic->cpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) for (i = 0; i < dic->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) page = f2fs_compress_alloc_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) f2fs_set_compressed_page(page, cc->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) start_idx + i + 1, dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) dic->cpages[i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return dic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) f2fs_free_dic(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void f2fs_free_dic(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (dic->tpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) for (i = 0; i < dic->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (dic->rpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (!dic->tpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) f2fs_compress_free_page(dic->tpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) page_array_free(dic->inode, dic->tpages, dic->cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (dic->cpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) for (i = 0; i < dic->nr_cpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!dic->cpages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) f2fs_compress_free_page(dic->cpages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) kmem_cache_free(dic_entry_slab, dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static void f2fs_put_dic(struct decompress_io_ctx *dic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (refcount_dec_and_test(&dic->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) f2fs_free_dic(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * Update and unlock the cluster's pagecache pages, and release the reference to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * the decompress_io_ctx that was being held for I/O completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) for (i = 0; i < dic->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct page *rpage = dic->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!rpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* PG_error was set if verity failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (failed || PageError(rpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ClearPageUptodate(rpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /* will re-read again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ClearPageError(rpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) SetPageUptodate(rpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unlock_page(rpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) f2fs_put_dic(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static void f2fs_verify_cluster(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct decompress_io_ctx *dic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) container_of(work, struct decompress_io_ctx, verity_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* Verify the cluster's decompressed pages with fs-verity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) for (i = 0; i < dic->cluster_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct page *rpage = dic->rpages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (rpage && !fsverity_verify_page(rpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) SetPageError(rpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) __f2fs_decompress_end_io(dic, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * This is called when a compressed cluster has been decompressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * (or failed to be read and/or decompressed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (!failed && dic->need_verity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * Note that to avoid deadlocks, the verity work can't be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * on the decompression workqueue. This is because verifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * the data pages can involve reading metadata pages from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * file, and these metadata pages may be compressed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) fsverity_enqueue_verify_work(&dic->verity_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) __f2fs_decompress_end_io(dic, failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * Put a reference to a compressed page's decompress_io_ctx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * This is called when the page is no longer needed and can be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) void f2fs_put_page_dic(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct decompress_io_ctx *dic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) (struct decompress_io_ctx *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) f2fs_put_dic(dic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) const struct address_space_operations f2fs_compress_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) .releasepage = f2fs_release_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .invalidatepage = f2fs_invalidate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return sbi->compress_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!sbi->compress_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) nid_t ino, block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) struct page *cpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!test_opt(sbi, COMPRESS_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (cpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) f2fs_put_page(cpage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!cpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) blkaddr, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) f2fs_put_page(cpage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) set_page_private_data(cpage, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) SetPageUptodate(cpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) f2fs_put_page(cpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) block_t blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct page *cpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) bool hitted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (!test_opt(sbi, COMPRESS_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (cpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (PageUptodate(cpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) atomic_inc(&sbi->compress_page_hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) memcpy(page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) page_address(cpage), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) hitted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) f2fs_put_page(cpage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return hitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct address_space *mapping = sbi->compress_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) pgoff_t index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) pgoff_t end = MAX_BLKADDR(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (!mapping->nrpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) unsigned int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) nr_pages = pagevec_lookup_range(&pvec, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) &index, end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (!nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct page *page = pvec.pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (page->index > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (page->mapping != mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (ino != get_page_private_data(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) generic_error_remove_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) pagevec_release(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) } while (index < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (!test_opt(sbi, COMPRESS_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) sbi->compress_inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) sbi->compress_percent = COMPRESS_PERCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) sbi->compress_watermark = COMPRESS_WATERMARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) atomic_set(&sbi->compress_page_hit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (!sbi->compress_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) iput(sbi->compress_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) sbi->compress_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) dev_t dev = sbi->sb->s_bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) char slab_name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) sbi->page_array_slab_size = sizeof(struct page *) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) F2FS_OPTION(sbi).compress_log_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sbi->page_array_slab_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (!sbi->page_array_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) kmem_cache_destroy(sbi->page_array_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static int __init f2fs_init_cic_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) sizeof(struct compress_io_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!cic_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static void f2fs_destroy_cic_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) kmem_cache_destroy(cic_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static int __init f2fs_init_dic_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) sizeof(struct decompress_io_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (!dic_entry_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static void f2fs_destroy_dic_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) kmem_cache_destroy(dic_entry_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int __init f2fs_init_compress_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) err = f2fs_init_cic_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) err = f2fs_init_dic_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) goto free_cic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) free_cic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) f2fs_destroy_cic_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) void f2fs_destroy_compress_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) f2fs_destroy_dic_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) f2fs_destroy_cic_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }