Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * This file is part of UBIFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2006-2008 Nokia Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Authors: Adrian Hunter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *          Artem Bityutskiy (Битюцкий Артём)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) /* This file implements TNC functions for committing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "ubifs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * @idx: buffer in which to place new index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * @znode: znode from which to make new index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * @lnum: LEB number where new index node will be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * @offs: offset where new index node will be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * @len: length of new index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 			 struct ubifs_znode *znode, int lnum, int offs, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	struct ubifs_znode *zp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	u8 hash[UBIFS_HASH_ARR_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	/* Make index node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	idx->ch.node_type = UBIFS_IDX_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	idx->child_cnt = cpu_to_le16(znode->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	idx->level = cpu_to_le16(znode->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	for (i = 0; i < znode->child_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		struct ubifs_zbranch *zbr = &znode->zbranch[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 		key_write_idx(c, &zbr->key, &br->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		br->lnum = cpu_to_le32(zbr->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		br->offs = cpu_to_le32(zbr->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		br->len = cpu_to_le32(zbr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		if (!zbr->lnum || !zbr->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			ubifs_err(c, "bad ref in znode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 			ubifs_dump_znode(c, znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 			if (zbr->znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 				ubifs_dump_znode(c, zbr->znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	ubifs_prepare_node(c, idx, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	ubifs_node_calc_hash(c, idx, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	znode->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	znode->offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	znode->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	err = insert_old_idx_znode(c, znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	/* Update the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	zp = znode->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (zp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		struct ubifs_zbranch *zbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		zbr = &zp->zbranch[znode->iip];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		zbr->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		zbr->offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		zbr->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		ubifs_copy_hash(c, hash, zbr->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		c->zroot.lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		c->zroot.offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		c->zroot.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		ubifs_copy_hash(c, hash, c->zroot.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	c->calc_idx_sz += ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	atomic_long_dec(&c->dirty_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	ubifs_assert(c, ubifs_zn_dirty(znode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	ubifs_assert(c, ubifs_zn_cow(znode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	 * Note, unlike 'write_index()' we do not add memory barriers here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	 * because this function is called with @c->tnc_mutex locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	__clear_bit(DIRTY_ZNODE, &znode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	__clear_bit(COW_ZNODE, &znode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * fill_gap - make index nodes in gaps in dirty index LEBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * @lnum: LEB number that gap appears in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * @gap_start: offset of start of gap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @gap_end: offset of end of gap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * @dirt: adds dirty space to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * This function returns the number of index nodes written into the gap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		    int *dirt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	int len, gap_remains, gap_pos, written, pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	ubifs_assert(c, (gap_start & 7) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	ubifs_assert(c, (gap_end & 7) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	ubifs_assert(c, gap_end >= gap_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	gap_remains = gap_end - gap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if (!gap_remains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	gap_pos = gap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	while (c->enext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		len = ubifs_idx_node_sz(c, c->enext->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		if (len < gap_remains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			struct ubifs_znode *znode = c->enext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			const int alen = ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			ubifs_assert(c, alen <= gap_remains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 					    lnum, gap_pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			gap_remains -= alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			gap_pos += alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			c->enext = znode->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			if (c->enext == c->cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 				c->enext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			written += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (gap_end == c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		c->ileb_len = ALIGN(gap_pos, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		/* Pad to end of min_io_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		pad_len = c->ileb_len - gap_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		/* Pad to end of gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		pad_len = gap_remains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	       lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	*dirt += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * find_old_idx - find an index node obsoleted since the last commit start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @lnum: LEB number of obsoleted index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * @offs: offset of obsoleted index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * Returns %1 if found and %0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct ubifs_old_idx *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	p = c->old_idx.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		o = rb_entry(p, struct ubifs_old_idx, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		if (lnum < o->lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		else if (lnum > o->lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		else if (offs < o->offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		else if (offs > o->offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * is_idx_node_in_use - determine if an index node can be overwritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * @key: key of index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * @level: index node level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * @lnum: LEB number of index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * @offs: offset of index node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * If @key / @lnum / @offs identify an index node that was not part of the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * index, then this function returns %0 (obsolete).  Else if the index node was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * part of the old index but is now dirty %1 is returned, else if it is clean %2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * is returned. A negative error code is returned on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			      int level, int lnum, int offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		return ret; /* Error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		if (find_old_idx(c, lnum, offs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * @p: return LEB number in @c->gap_lebs[p]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * This function lays out new index nodes for dirty znodes using in-the-gaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * method of TNC commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * This function merely puts the next znode into the next gap, making no attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * to try to maximise the number of znodes that fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * This function returns the number of index nodes written into the gaps, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static int layout_leb_in_gaps(struct ubifs_info *c, int p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct ubifs_scan_leb *sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct ubifs_scan_node *snod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	tot_written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* Get an index LEB with lots of obsolete index nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	lnum = ubifs_find_dirty_idx_leb(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	if (lnum < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 * There also may be dirt in the index head that could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		 * filled, however we do not check there at present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return lnum; /* Error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	c->gap_lebs[p] = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	dbg_gc("LEB %d", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * Scan the index LEB.  We use the generic scan for this even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * it is more comprehensive and less efficient than is needed for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 * purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	c->ileb_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (IS_ERR(sleb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return PTR_ERR(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	gap_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	list_for_each_entry(snod, &sleb->nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		struct ubifs_idx_node *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		int in_use, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		idx = snod->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		key_read(c, ubifs_idx_key(c, idx), &snod->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		level = le16_to_cpu(idx->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		/* Determine if the index node is in use (not obsolete) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 					    snod->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		if (in_use < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			return in_use; /* Error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (in_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			if (in_use == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 				dirt += ALIGN(snod->len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			 * The obsolete index nodes form gaps that can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			 * overwritten.  This gap has ended because we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			 * found an index node that is still in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			 * i.e. not obsolete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			gap_end = snod->offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			/* Try to fill gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			if (written < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 				ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 				return written; /* Error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			tot_written += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			gap_start = ALIGN(snod->offs + snod->len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	c->ileb_len = c->leb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	gap_end = c->leb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* Try to fill gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (written < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return written; /* Error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	tot_written += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (tot_written == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		struct ubifs_lprops lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		err = ubifs_read_one_lp(c, lnum, &lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		if (lp.free == c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			 * We must have snatched this LEB from the idx_gc list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			 * so we need to correct the free and dirty space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			err = ubifs_change_one_lp(c, lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 						  c->leb_size - c->ileb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 						  dirt, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				  0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return tot_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * get_leb_cnt - calculate the number of empty LEBs needed to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * @cnt: number of znodes to commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * This function returns the number of empty LEBs needed to commit @cnt znodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * to the current index head.  The number is not exact and may be more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int get_leb_cnt(struct ubifs_info *c, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	/* Assume maximum index node size (i.e. overestimate space needed) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	if (cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	d = c->leb_size / c->max_idx_node_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	return DIV_ROUND_UP(cnt, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * layout_in_gaps - in-the-gaps method of committing TNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * @cnt: number of dirty znodes to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * This function lays out new index nodes for dirty znodes using in-the-gaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * method of TNC commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static int layout_in_gaps(struct ubifs_info *c, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	dbg_gc("%d znodes to write", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				    GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (!c->gap_lebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	old_idx_lebs = c->lst.idx_lebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		ubifs_assert(c, p < c->lst.idx_lebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		written = layout_leb_in_gaps(c, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		if (written < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			err = written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			if (err != -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				kfree(c->gap_lebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				c->gap_lebs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			if (!dbg_is_chk_index(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				 * Do not print scary warnings if the debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				 * option which forces in-the-gaps is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 				ubifs_warn(c, "out of space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				ubifs_dump_budg(c, &c->bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 				ubifs_dump_lprops(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			/* Try to commit anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		cnt -= written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		leb_needed_cnt = get_leb_cnt(c, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		       leb_needed_cnt, c->ileb_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		 * Dynamically change the size of @c->gap_lebs to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		 * oob, because @c->lst.idx_lebs could be increased by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		 * function @get_idx_gc_leb (called by layout_leb_in_gaps->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		 * ubifs_find_dirty_idx_leb) during loop. Only enlarge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		 * @c->gap_lebs when needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		    old_idx_lebs < c->lst.idx_lebs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			old_idx_lebs = c->lst.idx_lebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 					       (old_idx_lebs + 1), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			if (!gap_lebs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				kfree(c->gap_lebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				c->gap_lebs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			c->gap_lebs = gap_lebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	} while (leb_needed_cnt > c->ileb_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	c->gap_lebs[p] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * layout_in_empty_space - layout index nodes in empty space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * This function lays out new index nodes for dirty znodes using empty LEBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static int layout_in_empty_space(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct ubifs_znode *znode, *cnext, *zp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int wlen, blen, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	cnext = c->enext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (!cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	lnum = c->ihead_lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	buf_offs = c->ihead_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	buf_len = ubifs_idx_node_sz(c, c->fanout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	buf_len = ALIGN(buf_len, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	/* Ensure there is enough room for first write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if (buf_offs + next_len > c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		znode = cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		len = ubifs_idx_node_sz(c, znode->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		/* Determine the index node position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		if (lnum == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			if (c->ileb_nxt >= c->ileb_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				ubifs_err(c, "out of space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			lnum = c->ilebs[c->ileb_nxt++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			buf_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		offs = buf_offs + used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		znode->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		znode->offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		znode->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		/* Update the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		zp = znode->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		if (zp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			struct ubifs_zbranch *zbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			i = znode->iip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			zbr = &zp->zbranch[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			zbr->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			zbr->offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			zbr->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			c->zroot.lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			c->zroot.offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			c->zroot.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		c->calc_idx_sz += ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 * Once lprops is updated, we can decrease the dirty znode count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 * but it is easier to just do it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		atomic_long_dec(&c->dirty_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		 * Calculate the next index node length to see if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		 * enough room for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		cnext = znode->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		if (cnext == c->cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			next_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		/* Update buffer positions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		wlen = used + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		used += ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		avail -= ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		if (next_len != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		    buf_offs + used + next_len <= c->leb_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		    avail > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		if (avail <= 0 && next_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		    buf_offs + used + next_len <= c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			blen = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			blen = ALIGN(wlen, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		/* The buffer is full or there are no more znodes to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		buf_offs += blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		if (next_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			if (buf_offs + next_len > c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				err = ubifs_update_one_lp(c, lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 					c->leb_size - buf_offs, blen - used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 					0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 				lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			used -= blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			if (used < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 				used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			avail = buf_len - used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 					  blen - used, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	c->dbg->new_ihead_lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	c->dbg->new_ihead_offs = buf_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * layout_commit - determine positions of index nodes to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * @no_space: indicates that insufficient empty LEBs were allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * @cnt: number of znodes to commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * Calculate and update the positions of index nodes to commit.  If there were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * an insufficient number of empty LEBs allocated, then index nodes are placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * into the gaps created by obsolete index nodes in non-empty index LEBs.  For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * this purpose, an obsolete index node is one that was not in the index as at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * the end of the last commit.  To write "in-the-gaps" requires that those index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * LEBs are updated atomically in-place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (no_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		err = layout_in_gaps(c, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	err = layout_in_empty_space(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * find_first_dirty - find first dirty znode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * @znode: znode to begin searching from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int i, cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (!znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (znode->level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			if (ubifs_zn_dirty(znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				return znode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		for (i = 0; i < znode->child_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			struct ubifs_zbranch *zbr = &znode->zbranch[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 				znode = zbr->znode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 				cont = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		if (!cont) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			if (ubifs_zn_dirty(znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 				return znode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * find_next_dirty - find next dirty znode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * @znode: znode to begin searching from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	int n = znode->iip + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	znode = znode->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (!znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	for (; n < znode->child_cnt; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		struct ubifs_zbranch *zbr = &znode->zbranch[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		if (zbr->znode && ubifs_zn_dirty(zbr->znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			return find_first_dirty(zbr->znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return znode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * get_znodes_to_commit - create list of dirty znodes to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * This function returns the number of znodes to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static int get_znodes_to_commit(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	struct ubifs_znode *znode, *cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	c->cnext = find_first_dirty(c->zroot.znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	znode = c->enext = c->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (!znode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		dbg_cmt("no znodes to commit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	cnt += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		ubifs_assert(c, !ubifs_zn_cow(znode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		__set_bit(COW_ZNODE, &znode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		znode->alt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		cnext = find_next_dirty(znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (!cnext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			znode->cnext = c->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		znode->cparent = znode->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		znode->ciip = znode->iip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		znode->cnext = cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		znode = cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		cnt += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	dbg_cmt("committing %d znodes", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * alloc_idx_lebs - allocate empty LEBs to be used to commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * @cnt: number of znodes to commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * This function returns %-ENOSPC if it cannot allocate a sufficient number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * empty LEBs.  %0 is returned on success, otherwise a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	int i, leb_cnt, lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	c->ileb_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	c->ileb_nxt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	leb_cnt = get_leb_cnt(c, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (!leb_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (!c->ilebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	for (i = 0; i < leb_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		lnum = ubifs_find_free_leb_for_idx(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		if (lnum < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			return lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		c->ilebs[c->ileb_cnt++] = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		dbg_cmt("LEB %d", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * It is possible that we allocate more empty LEBs for the commit than we need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * This functions frees the surplus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) static int free_unused_idx_lebs(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	int i, err = 0, lnum, er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		lnum = c->ilebs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		dbg_cmt("LEB %d", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 					 LPROPS_INDEX | LPROPS_TAKEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			err = er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * free_idx_lebs - free unused LEBs after commit end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) static int free_idx_lebs(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	err = free_unused_idx_lebs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	kfree(c->ilebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	c->ilebs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * ubifs_tnc_start_commit - start TNC commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * @zroot: new index root position is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * This function prepares the list of indexing nodes to commit and lays out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * their positions on flash. If there is not enough free space it uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * in-gap commit method. Returns zero in case of success and a negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	int err = 0, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	mutex_lock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	err = dbg_check_tnc(c, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	cnt = get_znodes_to_commit(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (cnt != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		int no_space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		err = alloc_idx_lebs(c, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		if (err == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			no_space = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		else if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		err = layout_commit(c, no_space, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		err = free_unused_idx_lebs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	destroy_old_idx(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	err = ubifs_save_dirty_idx_lnums(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	spin_lock(&c->space_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * Although we have not finished committing yet, update size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * committed index ('c->bi.old_idx_sz') and zero out the index growth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 * budget. It is OK to do this now, because we've reserved all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * space which is needed to commit the index, and it is save for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * budgeting subsystem to assume the index is already committed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 * even though it is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	c->bi.old_idx_sz = c->calc_idx_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	c->bi.uncommitted_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	spin_unlock(&c->space_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	mutex_unlock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	dbg_cmt("size of index %llu", c->calc_idx_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	free_idx_lebs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	mutex_unlock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  * write_index - write index nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * This function writes the index nodes whose positions were laid out in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * layout_in_empty_space function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) static int write_index(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	struct ubifs_idx_node *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct ubifs_znode *znode, *cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	cnext = c->enext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (!cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * Always write index nodes to the index head so that index nodes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * other types of nodes are never mixed in the same erase block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	lnum = c->ihead_lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	buf_offs = c->ihead_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	/* Allocate commit buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* Ensure there is enough room for first write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (buf_offs + next_len > c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 					  LPROPS_TAKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		u8 hash[UBIFS_HASH_ARR_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		znode = cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		idx = c->cbuf + used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		/* Make index node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		idx->ch.node_type = UBIFS_IDX_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		idx->child_cnt = cpu_to_le16(znode->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		idx->level = cpu_to_le16(znode->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		for (i = 0; i < znode->child_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			struct ubifs_zbranch *zbr = &znode->zbranch[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			key_write_idx(c, &zbr->key, &br->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			br->lnum = cpu_to_le32(zbr->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			br->offs = cpu_to_le32(zbr->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			br->len = cpu_to_le32(zbr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			if (!zbr->lnum || !zbr->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				ubifs_err(c, "bad ref in znode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				ubifs_dump_znode(c, znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				if (zbr->znode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 					ubifs_dump_znode(c, zbr->znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		len = ubifs_idx_node_sz(c, znode->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		ubifs_prepare_node(c, idx, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		ubifs_node_calc_hash(c, idx, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		mutex_lock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		if (znode->cparent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			ubifs_copy_hash(c, hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 					znode->cparent->zbranch[znode->ciip].hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		if (znode->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			if (!ubifs_zn_obsolete(znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				ubifs_copy_hash(c, hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 					znode->parent->zbranch[znode->iip].hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			ubifs_copy_hash(c, hash, c->zroot.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		mutex_unlock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		/* Determine the index node position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		if (lnum == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			lnum = c->ilebs[lnum_pos++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			buf_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		offs = buf_offs + used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (lnum != znode->lnum || offs != znode->offs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		    len != znode->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			ubifs_err(c, "inconsistent znode posn");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		/* Grab some stuff from znode while we still can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		cnext = znode->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		ubifs_assert(c, ubifs_zn_dirty(znode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		ubifs_assert(c, ubifs_zn_cow(znode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 * It is important that other threads should see %DIRTY_ZNODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		 * flag cleared before %COW_ZNODE. Specifically, it matters in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		 * the 'dirty_cow_znode()' function. This is the reason for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		 * first barrier. Also, we want the bit changes to be seen to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		 * other threads ASAP, to avoid unnecesarry copying, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		 * the reason for the second barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		clear_bit(DIRTY_ZNODE, &znode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		clear_bit(COW_ZNODE, &znode->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * We have marked the znode as clean but have not updated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * @c->clean_zn_cnt counter. If this znode becomes dirty again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 * before 'free_obsolete_znodes()' is called, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		 * @c->clean_zn_cnt will be decremented before it gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		 * incremented (resulting in 2 decrements for the same znode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		 * This means that @c->clean_zn_cnt may become negative for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		 * while.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		 * Q: why we cannot increment @c->clean_zn_cnt?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		 * A: because we do not have the @c->tnc_mutex locked, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		 *    following code would be racy and buggy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		 *    if (!ubifs_zn_obsolete(znode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		 *            atomic_long_inc(&c->clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		 *            atomic_long_inc(&ubifs_clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		 *    }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		 *    Thus, we just delay the @c->clean_zn_cnt update until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		 *    have the mutex locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		/* Do not access znode from this point on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		/* Update buffer positions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		wlen = used + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		used += ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		avail -= ALIGN(len, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		 * Calculate the next index node length to see if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		 * enough room for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		if (cnext == c->cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			next_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		nxt_offs = buf_offs + used + next_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		if (next_len && nxt_offs <= c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			if (avail > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				blen = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			wlen = ALIGN(wlen, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			blen = ALIGN(wlen, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			ubifs_pad(c, c->cbuf + wlen, blen - wlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		/* The buffer is full or there are no more znodes to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		buf_offs += blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		if (next_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			if (nxt_offs > c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 							  0, LPROPS_TAKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			used -= blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			if (used < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			avail = buf_len - used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			memmove(c->cbuf, c->cbuf + blen, used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	if (lnum != c->dbg->new_ihead_lnum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	    buf_offs != c->dbg->new_ihead_offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		ubifs_err(c, "inconsistent ihead");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	c->ihead_lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	c->ihead_offs = buf_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * free_obsolete_znodes - free obsolete znodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * At the end of commit end, obsolete znodes are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static void free_obsolete_znodes(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	struct ubifs_znode *znode, *cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	cnext = c->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		znode = cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		cnext = znode->cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if (ubifs_zn_obsolete(znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			kfree(znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			znode->cnext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			atomic_long_inc(&c->clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			atomic_long_inc(&ubifs_clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	} while (cnext != c->cnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * return_gap_lebs - return LEBs used by the in-gap commit method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * This function clears the "taken" flag for the LEBs which were used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  * "commit in-the-gaps" method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static int return_gap_lebs(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	int *p, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (!c->gap_lebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	dbg_cmt("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	for (p = c->gap_lebs; *p != -1; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 					  LPROPS_TAKEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	kfree(c->gap_lebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	c->gap_lebs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * ubifs_tnc_end_commit - update the TNC for commit end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * Write the dirty znodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int ubifs_tnc_end_commit(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (!c->cnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	err = return_gap_lebs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	err = write_index(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	mutex_lock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	free_obsolete_znodes(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	c->cnext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	kfree(c->ilebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	c->ilebs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	mutex_unlock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }