Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * JFFS2 -- Journalling Flash File System, Version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright © 2001-2007 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Created by David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * For licensing information, see the file 'LICENCE' in this directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "nodelist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "compr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 					  struct jffs2_inode_cache *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 					  struct jffs2_raw_node_ref *raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 					struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 				      uint32_t start, uint32_t end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 				       uint32_t start, uint32_t end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 			       struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /* Called with erase_completion_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct jffs2_eraseblock *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	struct list_head *nextlist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	int n = jiffies % 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	/* Pick an eraseblock to garbage collect next. This is where we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	   put the clever wear-levelling algorithms. Eventually.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	/* We possibly want to favour the dirtier blocks more when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	   number of free blocks is low. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		nextlist = &c->bad_used_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	} else if (n < 50 && !list_empty(&c->erasable_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		/* Note that most of them will have gone directly to be erased.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		   So don't favour the erasable_list _too_ much. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		nextlist = &c->erasable_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		/* Most of the time, pick one off the very_dirty list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		nextlist = &c->very_dirty_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	} else if (n < 126 && !list_empty(&c->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		nextlist = &c->dirty_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	} else if (!list_empty(&c->clean_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		jffs2_dbg(1, "Picking block from clean_list to GC next\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		nextlist = &c->clean_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	} else if (!list_empty(&c->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		nextlist = &c->dirty_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	} else if (!list_empty(&c->very_dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		nextlist = &c->very_dirty_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	} else if (!list_empty(&c->erasable_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		nextlist = &c->erasable_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		/* There are blocks are wating for the wbuf sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		jffs2_flush_wbuf_pad(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		/* Eep. All were empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	list_del(&ret->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	c->gcblock = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	ret->gc_node = ret->first_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (!ret->gc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			ret->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	/* Have we accidentally picked a clean block with wasted space ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (ret->wasted_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			  ret->wasted_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		ret->dirty_size += ret->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		c->wasted_size -= ret->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		c->dirty_size += ret->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		ret->wasted_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /* jffs2_garbage_collect_pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Make a single attempt to progress GC. Move one node, and possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * start erasing one eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct jffs2_inode_info *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct jffs2_inode_cache *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct jffs2_eraseblock *jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct jffs2_raw_node_ref *raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	uint32_t gcblock_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	int ret = 0, inum, nlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	int xattr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (mutex_lock_interruptible(&c->alloc_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		/* We can't start doing GC until we've finished checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		   the node CRCs etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		int bucket, want_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		if (!c->unchecked_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		if (!xattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			xattr = jffs2_verify_xattr(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		spin_lock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		/* Instead of doing the inodes in numeric order, doing a lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		 * in the hash for each possible number, just walk the hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		 * buckets of *existing* inodes. This means that we process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		 * them out-of-order, but it can be a lot faster if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		 * a sparse inode# space. Which there often is. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		want_ino = c->check_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				if (ic->ino < want_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 				if (ic->state != INO_STATE_CHECKEDABSENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				    ic->state != INO_STATE_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 					goto got_next; /* with inocache_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				jffs2_dbg(1, "Skipping ino #%u already checked\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 					  ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			want_ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		/* Point c->check_ino past the end of the last bucket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				~c->inocache_hashsize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			c->unchecked_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		jffs2_dbg_dump_block_lists_nolock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	got_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		/* For next time round the loop, we want c->checked_ino to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		 * the *next* one we want to check. And since we're walking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		 * buckets rather than doing it sequentially, it's: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		c->check_ino = ic->ino + c->inocache_hashsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		if (!ic->pino_nlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 				  ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			jffs2_xattr_delete_inode(c, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		switch(ic->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		case INO_STATE_CHECKEDABSENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		case INO_STATE_PRESENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		case INO_STATE_GC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		case INO_STATE_CHECKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			pr_warn("Inode #%u is in state %d during CRC check phase!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 				ic->ino, ic->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		case INO_STATE_READING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			/* We need to wait for it to finish, lest we move on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			   and trigger the BUG() above while we haven't yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			   finished checking all its nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 				  ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			/* We need to come back again for the _same_ inode. We've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			 made no progress in this case, but that should be OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			c->check_ino = ic->ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		case INO_STATE_UNCHECKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		ic->state = INO_STATE_CHECKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			  __func__, ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		ret = jffs2_do_crccheck_inode(c, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* If there are any blocks which need erasing, erase them now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (!list_empty(&c->erase_complete_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	    !list_empty(&c->erase_pending_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		if (jffs2_erase_pending_blocks(c, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		mutex_lock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	/* First, work out which block we're garbage-collecting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	jeb = c->gcblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (!jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		jeb = jffs2_find_gc_block(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (!jeb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		/* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		if (c->nr_erasing_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		  jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	D1(if (c->nextblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	   printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (!jeb->used_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		goto eraseit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	raw = jeb->gc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	gcblock_dirty = jeb->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	while(ref_obsolete(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		raw = ref_next(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		if (unlikely(!raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				jeb->offset, jeb->free_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 				jeb->dirty_size, jeb->used_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			jeb->gc_node = raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	jeb->gc_node = raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (!raw->next_in_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		/* Inode-less node. Clean marker, snapshot or something like that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (ref_flags(raw) == REF_PRISTINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			/* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			jffs2_garbage_collect_pristine(c, NULL, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			/* Just mark it obsolete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			jffs2_mark_node_obsolete(c, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		goto eraseit_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	ic = jffs2_raw_ref_to_ic(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) #ifdef CONFIG_JFFS2_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * We can decide whether this node is inode or xattr by ic->class.     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (ic->class == RAWNODE_CLASS_XATTR_DATUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	    || ic->class == RAWNODE_CLASS_XATTR_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		goto test_gcnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* We need to hold the inocache. Either the erase_completion_lock or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	   the inocache_lock are sufficient; we trade down since the inocache_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	   causes less contention. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	spin_lock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		  __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		  ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	/* Three possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	   1. Inode is already in-core. We must iget it and do proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	      updating to its fragtree, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	   2. Inode is not in-core, node is REF_PRISTINE. We lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	      inocache to prevent a read_inode(), copy the node intact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	   3. Inode is not in-core, node is not pristine. We must iget()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	      and take the slow path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	switch(ic->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	case INO_STATE_CHECKEDABSENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		/* It's been checked, but it's not currently in-core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		   We can just copy any pristine nodes, but have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		   to prevent anyone else from doing read_inode() while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		   we're at it, so we set the state accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		if (ref_flags(raw) == REF_PRISTINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			ic->state = INO_STATE_GC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				  ic->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	case INO_STATE_PRESENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		/* It's in-core. GC must iget() it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	case INO_STATE_UNCHECKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	case INO_STATE_CHECKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	case INO_STATE_GC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		/* Should never happen. We should have finished checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		   by the time we actually start doing any GC, and since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		   we're holding the alloc_sem, no other garbage collection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		   can happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			ic->ino, ic->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	case INO_STATE_READING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		/* Someone's currently trying to read it. We must wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		   them to finish and then go through the full iget() route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		   to do the GC. However, sometimes read_inode() needs to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		   the alloc_sem() (for marking nodes invalid) so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		   drop the alloc_sem before sleeping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			  __func__, ic->ino, ic->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		/* And because we dropped the alloc_sem we must start again from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		   beginning. Ponder chance of livelock here -- we're returning success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		   without actually making any progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		   Q: What are the chances that the inode is back in INO_STATE_READING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		   again by the time we next enter this function? And that this happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		   enough times to cause a real delay?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		   A: Small enough that I don't care :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	   node intact, and we don't have to muck about with the fragtree etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	   because we know it's not in-core. If it _was_ in-core, we go through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	   all the iget() crap anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (ic->state == INO_STATE_GC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ret = jffs2_garbage_collect_pristine(c, ic, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		spin_lock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		ic->state = INO_STATE_CHECKEDABSENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		wake_up(&c->inocache_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		if (ret != -EBADFD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			goto test_gcnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		/* Fall through if it wanted us to, with inocache_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	/* Prevent the fairly unlikely race where the gcblock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	   entirely obsoleted by the final close of a file which had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	   the only valid nodes in the block, followed by erasure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	   followed by freeing of the ic because the erased block(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	   held _all_ the nodes of that inode.... never been seen but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	   it's vaguely possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	inum = ic->ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	nlink = ic->pino_nlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	spin_unlock(&c->inocache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	f = jffs2_gc_fetch_inode(c, inum, !nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (IS_ERR(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		ret = PTR_ERR(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		goto release_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (!f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		goto release_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	ret = jffs2_garbage_collect_live(c, jeb, raw, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	jffs2_gc_release_inode(c, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  test_gcnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		/* Eep. This really should never happen. GC is broken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		pr_err("Error garbage collecting node at %08x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		       ref_offset(jeb->gc_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  release_sem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  eraseit_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	/* If we've finished this block, start it erasing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  eraseit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (c->gcblock && !c->gcblock->used_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			  c->gcblock->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		/* We're GC'ing an empty block? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		list_add_tail(&c->gcblock->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		c->gcblock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		jffs2_garbage_collect_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				      struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct jffs2_node_frag *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct jffs2_full_dnode *fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct jffs2_full_dirent *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	uint32_t start = 0, end = 0, nrfrags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	mutex_lock(&f->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/* Now we have the lock for this inode. Check that it's still the one at the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	   of the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (c->gcblock != jeb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		goto upnout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (ref_obsolete(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		/* They'll call again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		goto upnout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (f->metadata && f->metadata->raw == raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		fn = f->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		goto upnout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	/* FIXME. Read node and do lookup? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (frag->node && frag->node->raw == raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			fn = frag->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			end = frag->ofs + frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			if (!nrfrags++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				start = frag->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			if (nrfrags == frag->node->frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 				break; /* We've found them all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (ref_flags(raw) == REF_PRISTINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				/* Urgh. Return it sensibly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 				frag->node->raw = f->inocache->nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			if (ret != -EBADFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 				goto upnout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/* We found a datanode. Do the GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			/* It crosses a page boundary. Therefore, it must be a hole. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			/* It could still be a hole. But we GC the page this way anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		goto upnout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* Wasn't a dnode. Try dirent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	for (fd = f->dents; fd; fd=fd->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if (fd->raw == raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (fd && fd->ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	} else if (fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			ref_offset(raw), f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		if (ref_obsolete(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			pr_warn("But it's obsolete so we don't mind too much\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			jffs2_dbg_dump_node(c, ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  upnout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	mutex_unlock(&f->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 					  struct jffs2_inode_cache *ic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 					  struct jffs2_raw_node_ref *raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	union jffs2_node_union *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	uint32_t phys_ofs, alloclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	uint32_t crc, rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	int retried = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/* Ask for a small amount of space (or the totlen if smaller) because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	   don't want to force wastage of the end of a block if splitting would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	   work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* 'rawlen' is not the exact summary size; it is only an upper estimation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (alloclen < rawlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		/* Doesn't fit untouched. We'll go the old route and split it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	node = kmalloc(rawlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (!ret && retlen != rawlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		goto out_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (je32_to_cpu(node->u.hdr_crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	switch(je16_to_cpu(node->u.nodetype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	case JFFS2_NODETYPE_INODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		crc = crc32(0, node, sizeof(node->i)-8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (je32_to_cpu(node->i.node_crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				ref_offset(raw), je32_to_cpu(node->i.node_crc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (je32_to_cpu(node->i.dsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			if (je32_to_cpu(node->i.data_crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 					ref_offset(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 					je32_to_cpu(node->i.data_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 				goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	case JFFS2_NODETYPE_DIRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		crc = crc32(0, node, sizeof(node->d)-8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		if (je32_to_cpu(node->d.node_crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 				ref_offset(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				je32_to_cpu(node->d.node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		if (node->d.nsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			crc = crc32(0, node->d.name, node->d.nsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			if (je32_to_cpu(node->d.name_crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 					ref_offset(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 					je32_to_cpu(node->d.name_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		/* If it's inode-less, we don't _know_ what it is. Just copy it intact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		if (ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				ref_offset(raw), je16_to_cpu(node->u.nodetype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/* OK, all the CRCs are good; this node can just be copied as-is. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	phys_ofs = write_ofs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (ret || (retlen != rawlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			  rawlen, phys_ofs, ret, retlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		if (retlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 				  phys_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (!retried) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			/* Try to reallocate space and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			uint32_t dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			retried = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			jffs2_dbg_acct_sanity_check(c,jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			jffs2_dbg_acct_paranoia_check(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 						/* this is not the exact summary size of it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 							it is only an upper estimation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 					  phys_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				jffs2_dbg_acct_sanity_check(c,jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				jffs2_dbg_acct_paranoia_check(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 				  ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		goto out_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	jffs2_mark_node_obsolete(c, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  out_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	ret = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	goto out_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 					struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct jffs2_full_dnode *new_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct jffs2_raw_inode ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct jffs2_node_frag *last_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	union jffs2_device_node dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	char *mdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	int mdatalen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	uint32_t alloclen, ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	    S_ISCHR(JFFS2_F_I_MODE(f)) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		/* For these, we don't actually need to read the old node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		mdata = (char *)&dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			  __func__, mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		mdatalen = fn->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		mdata = kmalloc(fn->size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (!mdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			kfree(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			  __func__, mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				JFFS2_SUMMARY_INODE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			sizeof(ri) + mdatalen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	last_frag = frag_last(&f->fragtree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (last_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		/* Fetch the inode length from the fragtree rather then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		 * from i_size since i_size may have not been updated yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		ilen = last_frag->ofs + last_frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		ilen = JFFS2_F_I_SIZE(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	memset(&ri, 0, sizeof(ri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	ri.ino = cpu_to_je32(f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	ri.version = cpu_to_je32(++f->highest_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	ri.isize = cpu_to_je32(ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	ri.offset = cpu_to_je32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	ri.csize = cpu_to_je32(mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	ri.dsize = cpu_to_je32(mdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	ri.compr = JFFS2_COMPR_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (IS_ERR(new_fn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		ret = PTR_ERR(new_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	jffs2_mark_node_obsolete(c, fn->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	jffs2_free_full_dnode(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	f->metadata = new_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (S_ISLNK(JFFS2_F_I_MODE(f)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		kfree(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	struct jffs2_full_dirent *new_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	struct jffs2_raw_dirent rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	uint32_t alloclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	rd.nsize = strlen(fd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	rd.pino = cpu_to_je32(f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	rd.version = cpu_to_je32(++f->highest_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	rd.ino = cpu_to_je32(fd->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	/* If the times on this inode were set by explicit utime() they can be different,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	   so refrain from splatting them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		rd.mctime = cpu_to_je32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	rd.type = fd->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			sizeof(rd)+rd.nsize, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (IS_ERR(new_fd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			PTR_ERR(new_fd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		return PTR_ERR(new_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	jffs2_add_fd_to_list(c, new_fd, &f->dents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct jffs2_full_dirent **fdp = &f->dents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/* On a medium where we can't actually mark nodes obsolete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	   pernamently, such as NAND flash, we need to work out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	   whether this deletion dirent is still needed to actively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	   delete a 'real' dirent with the same name that's still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	   somewhere else on the flash. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (!jffs2_can_mark_obsolete(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		struct jffs2_raw_dirent *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		struct jffs2_raw_node_ref *raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		int name_len = strlen(fd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		uint32_t name_crc = crc32(0, fd->name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		rd = kmalloc(rawlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (!rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		/* Prevent the erase code from nicking the obsolete node refs while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		   we're looking at them. I really don't like this extra lock but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		   can't see any alternative. Suggestions on a postcard to... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		mutex_lock(&c->erase_free_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			/* We only care about obsolete ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			if (!(ref_obsolete(raw)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			/* Any dirent with the same name is going to have the same length... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			if (ref_totlen(c, NULL, raw) != rawlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			/* Doesn't matter if there's one in the same erase block. We're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			   delete it too at the same time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			/* This is an obsolete node belonging to the same directory, and it's of the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			   length. We need to take a closer look...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 					__func__, ret, ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				/* If we can't read it, we don't need to continue to obsolete it. Continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			if (retlen != rawlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 					__func__, retlen, rawlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 					ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			/* If the name CRC doesn't match, skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			if (je32_to_cpu(rd->name_crc) != name_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			/* If the name length doesn't match, or it's another deletion dirent, skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			/* OK, check the actual name now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			if (memcmp(rd->name, fd->name, name_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			/* OK. The name really does match. There really is still an older node on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			   the flash which our deletion dirent obsoletes. So we have to write out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			   a new deletion dirent to replace it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			mutex_unlock(&c->erase_free_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				  ref_offset(fd->raw), fd->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				  ref_offset(raw), je32_to_cpu(rd->ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			kfree(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			return jffs2_garbage_collect_dirent(c, jeb, f, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		mutex_unlock(&c->erase_free_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		kfree(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	   we should update the metadata node with those times accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	/* No need for it any more. Just mark it obsolete and remove it from the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	while (*fdp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if ((*fdp) == fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			*fdp = fd->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		fdp = &(*fdp)->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			fd->name, f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	jffs2_mark_node_obsolete(c, fd->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	jffs2_free_full_dirent(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 				      uint32_t start, uint32_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct jffs2_raw_inode ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct jffs2_node_frag *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	struct jffs2_full_dnode *new_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	uint32_t alloclen, ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		  f->inocache->ino, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	memset(&ri, 0, sizeof(ri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if(fn->frags > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		size_t readlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		uint32_t crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		/* It's partially obsoleted by a later write. So we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		   write it out again with the _same_ version as before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (readlen != sizeof(ri) || ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				ret, readlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			goto fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				__func__, ref_offset(fn->raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				__func__, ref_offset(fn->raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				je32_to_cpu(ri.totlen), sizeof(ri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		crc = crc32(0, &ri, sizeof(ri)-8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		if (crc != je32_to_cpu(ri.node_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				__func__, ref_offset(fn->raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				je32_to_cpu(ri.node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			/* FIXME: We could possibly deal with this by writing new holes for each frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				start, end, f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			goto fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		if (ri.compr != JFFS2_COMPR_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 				__func__, ref_offset(fn->raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				start, end, f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			goto fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	fill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		ri.totlen = cpu_to_je32(sizeof(ri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		ri.ino = cpu_to_je32(f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		ri.version = cpu_to_je32(++f->highest_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		ri.offset = cpu_to_je32(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		ri.dsize = cpu_to_je32(end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		ri.csize = cpu_to_je32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		ri.compr = JFFS2_COMPR_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	frag = frag_last(&f->fragtree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		/* Fetch the inode length from the fragtree rather then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		 * from i_size since i_size may have not been updated yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		ilen = frag->ofs + frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		ilen = JFFS2_F_I_SIZE(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	ri.isize = cpu_to_je32(ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	ri.data_crc = cpu_to_je32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				     JFFS2_SUMMARY_INODE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			sizeof(ri), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (IS_ERR(new_fn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		return PTR_ERR(new_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (je32_to_cpu(ri.version) == f->highest_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		jffs2_add_full_dnode_to_inode(c, f, new_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		if (f->metadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			jffs2_mark_node_obsolete(c, f->metadata->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			jffs2_free_full_dnode(f->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			f->metadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * We should only get here in the case where the node we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 * replacing had more than one frag, so we kept the same version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 * number as before. (Except in case of error -- see 'goto fill;'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * above.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	D1(if(unlikely(fn->frags <= 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				__func__, fn->frags, je32_to_cpu(ri.version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				f->highest_version, je32_to_cpu(ri.ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	});
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	mark_ref_normal(new_fn->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	     frag; frag = frag_next(frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (frag->ofs > fn->size + fn->ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		if (frag->node == fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			frag->node = new_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			new_fn->frags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			fn->frags--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (fn->frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		pr_warn("%s(): Old node still has frags!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (!new_fn->frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		pr_warn("%s(): New node has no frags!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	jffs2_mark_node_obsolete(c, fn->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	jffs2_free_full_dnode(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				       uint32_t start, uint32_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	struct jffs2_full_dnode *new_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct jffs2_raw_inode ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	uint32_t alloclen, offset, orig_end, orig_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	unsigned char *comprbuf = NULL, *writebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	unsigned char *pg_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	memset(&ri, 0, sizeof(ri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		  f->inocache->ino, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	orig_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	orig_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		/* Attempt to do some merging. But only expand to cover logically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		   adjacent frags if the block containing them is already considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		   to be dirty. Otherwise we end up with GC just going round in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		   circles dirtying the nodes it already wrote out, especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		   on NAND where we have small eraseblocks and hence a much higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		   chance of nodes having to be split to cross boundaries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		struct jffs2_node_frag *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		uint32_t min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		min = start & ~(PAGE_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		max = min + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		frag = jffs2_lookup_node_frag(&f->fragtree, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		/* BUG_ON(!frag) but that'll happen anyway... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		BUG_ON(frag->ofs != start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		/* First grow down... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		while((frag = frag_prev(frag)) && frag->ofs >= min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			/* If the previous frag doesn't even reach the beginning, there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			   excessive fragmentation. Just merge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			if (frag->ofs > min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 				jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 					  frag->ofs, frag->ofs+frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 				start = frag->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			/* OK. This frag holds the first byte of the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			if (!frag->node || !frag->node->raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 					  frag->ofs, frag->ofs+frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 				/* OK, it's a frag which extends to the beginning of the page. Does it live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				   in a block which is still considered clean? If so, don't obsolete it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 				   If not, cover it anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 				struct jffs2_raw_node_ref *raw = frag->node->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 				struct jffs2_eraseblock *jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				jeb = &c->blocks[raw->flash_offset / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				if (jeb == c->gcblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 					jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 						  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 						  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 						  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 					start = frag->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 					jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 						  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 						  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 						  jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 					  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 					  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					  jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				start = frag->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		/* ... then up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		/* Find last frag which is actually part of the node we're to GC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			/* If the previous frag doesn't even reach the beginning, there's lots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			   of fragmentation. Just merge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			if (frag->ofs+frag->size < max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 					  frag->ofs, frag->ofs+frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				end = frag->ofs + frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			if (!frag->node || !frag->node->raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 				jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 					  frag->ofs, frag->ofs+frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				/* OK, it's a frag which extends to the beginning of the page. Does it live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				   in a block which is still considered clean? If so, don't obsolete it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 				   If not, cover it anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 				struct jffs2_raw_node_ref *raw = frag->node->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				struct jffs2_eraseblock *jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 				jeb = &c->blocks[raw->flash_offset / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 				if (jeb == c->gcblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 					jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 						  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 						  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 						  ref_offset(raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 					end = frag->ofs + frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 					jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 						  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 						  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 						  jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 				jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 					  frag->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 					  frag->ofs + frag->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 					  jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				end = frag->ofs + frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			  orig_start, orig_end, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		BUG_ON(end < orig_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		BUG_ON(start > orig_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	/* The rules state that we must obtain the page lock *before* f->sem, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	 * actually going to *change* so we're safe; we only allow reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	 * It is important to note that jffs2_write_begin() will ensure that its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	 * page is marked Uptodate before allocating space. That means that if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	 * end up here trying to GC the *same* page that jffs2_write_begin() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	 * trying to write out, read_cache_page() will not deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	mutex_unlock(&f->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			       jffs2_do_readpage_unlock, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		pr_warn("read_cache_page() returned error: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			PTR_ERR(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		mutex_lock(&f->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	pg_ptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	mutex_lock(&f->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	offset = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	while(offset < orig_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		uint32_t datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		uint32_t cdatalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		uint16_t comprtype = JFFS2_COMPR_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 					&alloclen, JFFS2_SUMMARY_INODE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 				sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		datalen = end - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		ri.ino = cpu_to_je32(f->inocache->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		ri.version = cpu_to_je32(++f->highest_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		ri.offset = cpu_to_je32(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		ri.csize = cpu_to_je32(cdatalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		ri.dsize = cpu_to_je32(datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		ri.compr = comprtype & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		ri.usercompr = (comprtype >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		jffs2_free_comprbuf(comprbuf, writebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		if (IS_ERR(new_fn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			pr_warn("Error writing new dnode: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				PTR_ERR(new_fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			ret = PTR_ERR(new_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		offset += datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		if (f->metadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			jffs2_mark_node_obsolete(c, f->metadata->raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			jffs2_free_full_dnode(f->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			f->metadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }