Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * fs/f2fs/inline.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2013, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors: Huajun Li <huajun.li@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *          Haicheng Li <haicheng.li@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <trace/events/android_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) bool f2fs_may_inline_data(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	if (f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (i_size_read(inode) > MAX_INLINE_DATA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	if (f2fs_post_read_required(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) bool f2fs_may_inline_dentry(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (!S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	void *src_addr, *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	f2fs_bug_on(F2FS_P_SB(page), page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* Copy the whole inline data block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	src_addr = inline_data_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	dst_addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	kunmap_atomic(dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) void f2fs_truncate_inline_inode(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 					struct page *ipage, u64 from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (from >= MAX_INLINE_DATA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	addr = inline_data_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (from == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		clear_inode_flag(inode, FI_DATA_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) int f2fs_read_inline_data(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (trace_android_fs_dataread_start_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		path = android_fstrace_get_pathname(pathbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 						    MAX_TRACE_PATHBUF_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 						    inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		trace_android_fs_dataread_start(inode, page_offset(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 						PAGE_SIZE, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 						path, current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		trace_android_fs_dataread_end(inode, page_offset(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 					      PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (!f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		trace_android_fs_dataread_end(inode, page_offset(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 					      PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (page->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		zero_user_segment(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		f2fs_do_read_inline_data(page, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	trace_android_fs_dataread_end(inode, page_offset(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 				      PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct f2fs_io_info fio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		.sbi = F2FS_I_SB(dn->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		.ino = dn->inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		.type = DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		.op = REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		.op_flags = REQ_SYNC | REQ_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		.page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		.encrypted_page = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		.io_type = FS_DATA_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	int dirty, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (!f2fs_exist_data(dn->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		goto clear_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	err = f2fs_reserve_block(dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		f2fs_truncate_data_blocks_range(dn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		f2fs_put_dnode(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	fio.version = ni.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		f2fs_put_dnode(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			  __func__, dn->inode->i_ino, dn->data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	f2fs_do_read_inline_data(page, dn->inode_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/* clear dirty state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	dirty = clear_page_dirty_for_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	/* write data page to try to make data consistent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	fio.old_blkaddr = dn->data_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	set_inode_flag(dn->inode, FI_HOT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	f2fs_outplace_write_data(dn, &fio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		inode_dec_dirty_pages(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		f2fs_remove_dirty_inode(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/* this converted inline_data should be recovered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	set_inode_flag(dn->inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/* clear inline data and flag after data writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	clear_page_private_inline(dn->inode_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) clear_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	stat_dec_inline_inode(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	clear_inode_flag(dn->inode, FI_INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	f2fs_put_dnode(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int f2fs_convert_inline_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct page *ipage, *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (!f2fs_has_inline_data(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	err = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		err = PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	set_new_dnode(&dn, inode, ipage, ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (f2fs_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		err = f2fs_convert_inline_page(&dn, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		f2fs_balance_fs(sbi, dn.node_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int f2fs_write_inline_data(struct inode *inode, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	void *src_addr, *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (!f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	f2fs_bug_on(F2FS_I_SB(inode), page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	src_addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	dst_addr = inline_data_addr(inode, dn.inode_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	kunmap_atomic(src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	set_page_dirty(dn.inode_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	f2fs_clear_page_cache_dirty_tag(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	set_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	set_inode_flag(inode, FI_DATA_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	clear_page_private_inline(dn.inode_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct f2fs_inode *ri = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	void *src_addr, *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * The inline_data recovery policy is as follows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * [prev.] [next] of inline_data flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 *    o       o  -> recover inline_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 *    o       x  -> remove inline_data, and then recover data blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 *    x       o  -> remove data blocks, and then recover inline_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	 *    x       x  -> recover data blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (IS_INODE(npage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		ri = F2FS_INODE(npage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (f2fs_has_inline_data(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) process_inline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		f2fs_wait_on_page_writeback(ipage, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		src_addr = inline_data_addr(inode, npage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		dst_addr = inline_data_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		set_inode_flag(inode, FI_INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		set_inode_flag(inode, FI_DATA_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		f2fs_truncate_inline_inode(inode, ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		stat_dec_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		clear_inode_flag(inode, FI_INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		ret = f2fs_truncate_blocks(inode, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		stat_inc_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		goto process_inline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 					const struct f2fs_filename *fname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					struct page **res_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct f2fs_dir_entry *de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	void *inline_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	ipage = f2fs_get_node_page(sbi, dir->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		*res_page = ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	inline_dentry = inline_data_addr(dir, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	make_dentry_ptr_inline(dir, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	de = f2fs_find_target_dentry(&d, fname, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	unlock_page(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (IS_ERR(de)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		*res_page = ERR_CAST(de);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		de = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		*res_page = ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		f2fs_put_page(ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	return de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 							struct page *ipage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	void *inline_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	inline_dentry = inline_data_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	make_dentry_ptr_inline(inode, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	f2fs_do_make_empty_dir(inode, parent, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	/* update i_size to MAX_INLINE_DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (i_size_read(inode) < MAX_INLINE_DATA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * NOTE: ipage is grabbed by caller, but if any error occurs, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * release ipage in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 							void *inline_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	struct f2fs_dentry_block *dentry_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct f2fs_dentry_ptr src, dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	set_new_dnode(&dn, dir, ipage, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	err = f2fs_reserve_block(&dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			  __func__, dir->i_ino, dn.data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		err = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	dentry_blk = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	make_dentry_ptr_inline(dir, &src, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	make_dentry_ptr_block(dir, &dst, dentry_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	/* copy data from inline dentry block to new dentry block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	 * we do not need to zero out remainder part of dentry and filename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	 * field, since we have used bitmap for marking the usage status of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	 * them, besides, we can also ignore copying/zeroing reserved space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	 * of dentry block, because them haven't been used so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	/* clear inline dir and flag after data writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	f2fs_truncate_inline_inode(dir, ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	stat_dec_inline_dir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	clear_inode_flag(dir, FI_INLINE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	 * should retrieve reserved space which was used to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	 * inline_dentry's structure for backward compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			!f2fs_has_inline_xattr(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		F2FS_I(dir)->i_inline_xattr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	f2fs_i_depth_write(dir, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (i_size_read(dir) < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		f2fs_i_size_write(dir, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	unsigned long bit_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	make_dentry_ptr_inline(dir, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	while (bit_pos < d.max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		struct f2fs_dir_entry *de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		struct f2fs_filename fname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		nid_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		umode_t fake_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		if (!test_bit_le(bit_pos, d.bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			bit_pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		de = &d.dentry[bit_pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		if (unlikely(!de->name_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			bit_pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		 * We only need the disk_name and hash to move the dentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		 * We don't need the original or casefolded filenames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		memset(&fname, 0, sizeof(fname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		fname.disk_name.name = d.filename[bit_pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		fname.disk_name.len = le16_to_cpu(de->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		fname.hash = de->hash_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		ino = le32_to_cpu(de->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		fake_mode = f2fs_get_de_type(de) << S_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			goto punch_dentry_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) punch_dentry_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	truncate_inode_pages(&dir->i_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	f2fs_truncate_blocks(dir, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	f2fs_remove_dirty_inode(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 							void *inline_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	void *backup_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 				MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	if (!backup_dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	f2fs_truncate_inline_inode(dir, ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	unlock_page(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	err = f2fs_add_inline_entries(dir, backup_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		goto recover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	lock_page(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	stat_dec_inline_dir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	clear_inode_flag(dir, FI_INLINE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	 * should retrieve reserved space which was used to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	 * inline_dentry's structure for backward compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			!f2fs_has_inline_xattr(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		F2FS_I(dir)->i_inline_xattr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	kfree(backup_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) recover:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	lock_page(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	f2fs_i_depth_write(dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	kfree(backup_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 							void *inline_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	if (!F2FS_I(dir)->i_dir_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	struct f2fs_filename fname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	void *inline_dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (!f2fs_has_inline_dentry(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	ipage = f2fs_get_node_page(sbi, dir->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		err = PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		goto out_fname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (f2fs_has_enough_room(dir, ipage, &fname)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		goto out_fname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	inline_dentry = inline_data_addr(dir, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	err = do_convert_inline_dir(dir, ipage, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) out_fname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	f2fs_free_filename(&fname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			  struct inode *inode, nid_t ino, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	unsigned int bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	void *inline_dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	ipage = f2fs_get_node_page(sbi, dir->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	inline_dentry = inline_data_addr(dir, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	make_dentry_ptr_inline(dir, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	if (bit_pos >= d.max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		err = do_convert_inline_dir(dir, ipage, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		f2fs_down_write(&F2FS_I(inode)->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			err = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			   bit_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	set_page_dirty(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	/* we don't need to mark_inode_dirty now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		f2fs_i_pino_write(inode, dir->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		/* synchronize inode page's data from inode cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		if (is_inode_flag_set(inode, FI_NEW_INODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			f2fs_update_inode(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	f2fs_update_parent_metadata(dir, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		f2fs_up_write(&F2FS_I(inode)->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 					struct inode *dir, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	void *inline_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	unsigned int bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	f2fs_wait_on_page_writeback(page, NODE, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	inline_dentry = inline_data_addr(dir, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	make_dentry_ptr_inline(dir, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	bit_pos = dentry - d.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	for (i = 0; i < slots; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		__clear_bit_le(bit_pos + i, d.bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	dir->i_ctime = dir->i_mtime = current_time(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	f2fs_mark_inode_dirty_sync(dir, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		f2fs_drop_nlink(dir, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bool f2fs_empty_inline_dir(struct inode *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	unsigned int bit_pos = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	void *inline_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	ipage = f2fs_get_node_page(sbi, dir->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	inline_dentry = inline_data_addr(dir, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	make_dentry_ptr_inline(dir, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	if (bit_pos < d.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 				struct fscrypt_str *fstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	struct page *ipage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	struct f2fs_dentry_ptr d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	void *inline_dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	make_dentry_ptr_inline(inode, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (ctx->pos == d.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	 * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	 * ipage without page's lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	unlock_page(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	inline_dentry = inline_data_addr(inode, ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	make_dentry_ptr_inline(inode, &d, inline_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	err = f2fs_fill_dentries(ctx, &d, 0, fstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		ctx->pos = d.max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	f2fs_put_page(ipage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	return err < 0 ? err : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int f2fs_inline_data_fiemap(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	__u64 byteaddr, ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 				!f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	if (start >= ilen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	if (start + len < ilen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		ilen = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	ilen -= start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	byteaddr += (char *)inline_data_addr(inode, ipage) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 					(char *)F2FS_INODE(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }