^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file is part of UBIFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006-2008 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Adrian Hunter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Artem Bityutskiy (Битюцкий Артём)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This file implements functions needed to recover from unclean un-mounts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * When UBIFS is mounted, it checks a flag on the master node to determine if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * an un-mount was completed successfully. If not, the process of mounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * incorporates additional checking and fixing of on-flash data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * UBIFS always cleans away all remnants of an unclean un-mount, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * errors do not accumulate. However UBIFS defers recovery if it is mounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * read-only, and the flash is not modified in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The general UBIFS approach to the recovery is that it recovers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * corruptions which could be caused by power cuts, but it refuses to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * from corruption caused by other reasons. And UBIFS tries to distinguish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * between these 2 reasons of corruptions and silently recover in the former
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * case and loudly complain in the latter case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * UBIFS writes only to erased LEBs, so it writes only to the flash space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * containing only 0xFFs. UBIFS also always writes strictly from the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * of the LEB to the end. And UBIFS assumes that the underlying flash media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * writes in @c->max_write_size bytes at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Hence, if UBIFS finds a corrupted node at offset X, it expects only the min.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * I/O unit corresponding to offset X to contain corrupted data, all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * following min. I/O units have to contain empty space (all 0xFFs). If this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * not true, the corruption cannot be the result of a power cut, and UBIFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * refuses to mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "ubifs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * is_empty - determine whether a buffer is empty (contains all 0xff).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @buf: buffer to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * This function returns %1 if the buffer is empty (contains all 0xff) otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * %0 is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int is_empty(void *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) uint8_t *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (*p++ != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * first_non_ff - find offset of the first non-0xff byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @buf: buffer to search in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * This function returns offset of the first non-0xff byte in @buf or %-1 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * the buffer contains only 0xff bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int first_non_ff(void *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) uint8_t *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (*p++ != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * get_master_node - get the last valid master node allowing for corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @lnum: LEB number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @pbuf: buffer containing the LEB read, is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @mst: master node, if found, is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @cor: corruption, if found, is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * This function allocates a buffer, reads the LEB into it, and finds and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * returns the last valid master node allowing for one area of corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * The corrupt area, if there is one, must be consistent with the assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * that it is the result of an unclean unmount while the master node was being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * written. Under those circumstances, it is valid to use the previously written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * master node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct ubifs_mst_node **mst, void **cor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const int sz = c->mst_node_alsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int err, offs, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void *sbuf, *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) sbuf = vmalloc(c->leb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (err && err != -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Find the first position that is definitely not a node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) buf = sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) len = c->leb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) offs += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) len -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* See if there was a valid master node before that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) offs -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) buf -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) len += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (ret != SCANNED_A_NODE && offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Could have been corruption so check one place back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) offs -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) buf -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) len += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ret != SCANNED_A_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * We accept only one area of corruption because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * we are assuming that it was caused while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * trying to write a master node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (ret == SCANNED_A_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (ch->node_type != UBIFS_MST_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dbg_rcvry("found a master node at %d:%d", lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *mst = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) offs += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) len -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Check for corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (offs < c->leb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!is_empty(buf, min_t(int, len, sz))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *cor = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dbg_rcvry("found corruption at %d:%d", lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) offs += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) len -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Check remaining empty space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (offs < c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!is_empty(buf, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *pbuf = sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) vfree(sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *mst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *cor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * write_rcvrd_mst_node - write recovered master node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @mst: master node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int write_rcvrd_mst_node(struct ubifs_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct ubifs_mst_node *mst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __le32 save_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dbg_rcvry("recovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) save_flags = mst->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) err = ubifs_prepare_node_hmac(c, mst, UBIFS_MST_NODE_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) offsetof(struct ubifs_mst_node, hmac), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err = ubifs_leb_change(c, lnum, mst, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) err = ubifs_leb_change(c, lnum + 1, mst, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mst->flags = save_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * ubifs_recover_master_node - recover the master node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * This function recovers the master node from corruption that may occur due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * an unclean unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ubifs_recover_master_node(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) const int sz = c->mst_node_alsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int err, offs1, offs2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dbg_rcvry("recovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (mst1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) offs1 = (void *)mst1 - buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (offs1 == 0 && !cor1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * mst1 was written by recovery at offset 0 with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dbg_rcvry("recovery recovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mst = mst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) } else if (mst2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) offs2 = (void *)mst2 - buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (offs1 == offs2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Same offset, so must be the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (ubifs_compare_master_node(c, mst1, mst2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mst = mst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else if (offs2 + sz == offs1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* 1st LEB was written, 2nd was not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (cor1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) mst = mst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) } else if (offs1 == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) c->leb_size - offs2 - sz < sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* 1st LEB was unmapped and written, 2nd not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (cor1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mst = mst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * 2nd LEB was unmapped and about to be written, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * there must be only one master node in the first LEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * and no corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (offs1 != 0 || cor1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) mst = mst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!mst2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * 1st LEB was unmapped and about to be written, so there must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * be no room left in 2nd LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) offs2 = (void *)mst2 - buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (offs2 + sz + sz <= c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) mst = mst2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ubifs_msg(c, "recovered master node from LEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (c->ro_mount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Read-only mode. Keep a copy for switching to rw mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!c->rcvrd_mst_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * We had to recover the master node, which means there was an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * unclean reboot. However, it is possible that the master node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * E.g., consider the following chain of events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * 1. UBIFS was cleanly unmounted, so the master node is clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * 2. UBIFS is being mounted R/W and starts changing the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * node in the first (%UBIFS_MST_LNUM). A power cut happens,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * so this LEB ends up with some amount of garbage at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * 3. UBIFS is being mounted R/O. We reach this place and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * recover the master node from the second LEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * (%UBIFS_MST_LNUM + 1). But we cannot update the media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * because we are being mounted R/O. We have to defer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * 4. However, this master node (@c->mst_node) is marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * clean (since the step 1). And if we just return, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * mount code will be confused and won't recover the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * node when it is re-mounter R/W later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Thus, to force the recovery by marking the master node as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Write the recovered master node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) err = write_rcvrd_mst_node(c, c->mst_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) vfree(buf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) vfree(buf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ubifs_err(c, "failed to recover master node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (mst1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ubifs_err(c, "dumping first master node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ubifs_dump_node(c, mst1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (mst2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ubifs_err(c, "dumping second master node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ubifs_dump_node(c, mst2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) vfree(buf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) vfree(buf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * ubifs_write_rcvrd_mst_node - write the recovered master node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * This function writes the master node that was recovered during mounting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * read-only mode and must now be written because we are remounting rw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int ubifs_write_rcvrd_mst_node(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!c->rcvrd_mst_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) err = write_rcvrd_mst_node(c, c->rcvrd_mst_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) kfree(c->rcvrd_mst_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) c->rcvrd_mst_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * is_last_write - determine if an offset was in the last write to a LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * @buf: buffer to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @offs: offset to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * This function returns %1 if @offs was in the last write to the LEB whose data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * is in @buf, otherwise %0 is returned. The determination is made by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * for subsequent empty space starting from the next @c->max_write_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int empty_offs, check_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) uint8_t *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * Round up to the next @c->max_write_size boundary i.e. @offs is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * the last wbuf written. After that should be empty space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) empty_offs = ALIGN(offs + 1, c->max_write_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) check_len = c->leb_size - empty_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) p = buf + empty_offs - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return is_empty(p, check_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * clean_buf - clean the data from an LEB sitting in a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @buf: buffer to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @lnum: LEB number to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @offs: offset from which to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * This function pads up to the next min_io_size boundary (if there is one) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * sets empty space to all 0xff. @buf, @offs and @len are updated to the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @c->min_io_size boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int *offs, int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int empty_offs, pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ubifs_assert(c, !(*offs & 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) empty_offs = ALIGN(*offs, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pad_len = empty_offs - *offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ubifs_pad(c, *buf, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *offs += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *buf += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *len -= pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) memset(*buf, 0xff, c->leb_size - empty_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * no_more_nodes - determine if there are no more nodes in a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @buf: buffer to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @lnum: LEB number of the LEB from which @buf was read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @offs: offset from which @buf was read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * This function ensures that the corrupted node at @offs is the last thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * written to a LEB. This function returns %1 if more data is not found and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * %0 if more data is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int no_more_nodes(const struct ubifs_info *c, void *buf, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int lnum, int offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int skip, dlen = le32_to_cpu(ch->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Check for empty space after the corrupt node's common header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (is_empty(buf + skip, len - skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * The area after the common header size is not empty, so the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * header must be intact. Check it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Now we know the corrupt node's length we can skip over it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) skip = ALIGN(offs + dlen, c->max_write_size) - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* After which there should be empty space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (is_empty(buf + skip, len - skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * fix_unclean_leb - fix an unclean LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @sleb: scanned LEB information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @start: offset where scan started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int lnum = sleb->lnum, endpt = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* Get the end offset of the last node we are keeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!list_empty(&sleb->nodes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct ubifs_scan_node *snod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) snod = list_entry(sleb->nodes.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct ubifs_scan_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) endpt = snod->offs + snod->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (c->ro_mount && !c->remounting_rw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Add to recovery list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct ubifs_unclean_leb *ucleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dbg_rcvry("need to fix LEB %d start %d endpt %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) lnum, start, sleb->endpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!ucleb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ucleb->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ucleb->endpt = endpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) list_add_tail(&ucleb->list, &c->unclean_leb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Write the fixed LEB back to flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dbg_rcvry("fixing LEB %d start %d endpt %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) lnum, start, sleb->endpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (endpt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) err = ubifs_leb_unmap(c, lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int len = ALIGN(endpt, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err = ubifs_leb_read(c, lnum, sleb->buf, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) start, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Pad to min_io_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (len > endpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int pad_len = len - ALIGN(endpt, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (pad_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void *buf = sleb->buf + len - pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ubifs_pad(c, buf, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) err = ubifs_leb_change(c, lnum, sleb->buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * drop_last_group - drop the last group of nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * @sleb: scanned LEB information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * @offs: offset of dropped nodes is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * This is a helper function for 'ubifs_recover_leb()' which drops the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * group of nodes of the scanned LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) while (!list_empty(&sleb->nodes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct ubifs_scan_node *snod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct ubifs_ch *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ch = snod->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ch->group_type != UBIFS_IN_NODE_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dbg_rcvry("dropping grouped node at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) sleb->lnum, snod->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *offs = snod->offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) list_del(&snod->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) kfree(snod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sleb->nodes_cnt -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * drop_last_node - drop the last node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * @sleb: scanned LEB information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * @offs: offset of dropped nodes is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * This is a helper function for 'ubifs_recover_leb()' which drops the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * node of the scanned LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct ubifs_scan_node *snod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!list_empty(&sleb->nodes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dbg_rcvry("dropping last node at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) sleb->lnum, snod->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *offs = snod->offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) list_del(&snod->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) kfree(snod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sleb->nodes_cnt -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * ubifs_recover_leb - scan and recover a LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * @lnum: LEB number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @offs: offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * belong to any journal head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * This function does a scan of a LEB, but caters for errors that might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * been caused by the unclean unmount from which we are attempting to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Returns the scanned information on success and a negative error code on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int offs, void *sbuf, int jhead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct ubifs_scan_leb *sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void *buf = sbuf + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) sleb = ubifs_start_scan(c, lnum, offs, sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_ERR(sleb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ubifs_assert(c, len >= 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) while (len >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dbg_scan("look at LEB %d:%d (%d bytes left)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) lnum, offs, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Scan quietly until there is an error from which we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (ret == SCANNED_A_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* A valid node, and not a padding node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) err = ubifs_add_snod(c, sleb, buf, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) node_len = ALIGN(le32_to_cpu(ch->len), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) offs += node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) buf += node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) len -= node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Padding bytes or a valid padding node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) offs += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) buf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else if (ret == SCANNED_A_CORRUPT_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dbg_rcvry("found corruption (%d) at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ret, lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (ubifs_check_node(c, buf, lnum, offs, 1, 1) == -EUCLEAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) !no_more_nodes(c, buf, len, lnum, offs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * If the flash voltage power down suddenly in the programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * process, it may lead to abnormal data written by the flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * in the low-voltage operation process, and the last data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * should be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ubifs_msg(c, "recovery corrupt node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) skip = ALIGN(offs + le32_to_cpu(ch->len), c->max_write_size) - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) memset(buf + skip, 0xff, len - skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) } else if (ret == SCANNED_EMPTY_SPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dbg_rcvry("found corruption (%d) at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ret, lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!is_empty(buf, len) && !is_last_write(c, buf, offs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * If the flash voltage power down suddenly in the programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * process, it may lead to the data was programmed to the wroge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * page written by the flash in the low-voltage operation process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * and the data should be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ubifs_msg(c, "recovery empty space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) memset(buf, 0xff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) } else if (ret == SCANNED_GARBAGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ret == SCANNED_A_BAD_PAD_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dbg_rcvry("found corruption (%d) at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ret, lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ubifs_err(c, "unexpected return value %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!is_last_write(c, buf, offs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto corrupted_rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) } else if (ret == SCANNED_A_CORRUPT_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!no_more_nodes(c, buf, len, lnum, offs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto corrupted_rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) } else if (!is_empty(buf, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!is_last_write(c, buf, offs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int corruption = first_non_ff(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * See header comment for this file for more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * explanations about the reasons we have this check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) lnum, offs, corruption);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Make sure we dump interesting non-0xFF data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) offs += corruption;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) buf += corruption;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto corrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) min_io_unit = round_down(offs, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (grouped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * If nodes are grouped, always drop the incomplete group at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) drop_last_group(sleb, &offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (jhead == GCHD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * If this LEB belongs to the GC head then while we are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * middle of the same min. I/O unit keep dropping nodes. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * basically, what we want is to make sure that the last min.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * I/O unit where we saw the corruption is dropped completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * with all the uncorrupted nodes which may possibly sit there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * In other words, let's name the min. I/O unit where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * corruption starts B, and the previous min. I/O unit A. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * below code tries to deal with a situation when half of B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * contains valid nodes or the end of a valid node, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * second half of B contains corrupted data or garbage. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * means that UBIFS had been writing to B just before the power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * cut happened. I do not know how realistic is this scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * that half of the min. I/O unit had been written successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * and the other half not, but this is possible in our 'failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * mode emulation' infrastructure at least.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * So what is the problem, why we need to drop those nodes? Why
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * can't we just clean-up the second half of B by putting a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * padding node there? We can, and this works fine with one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * exception which was reproduced with power cut emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * testing and happens extremely rarely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * Imagine the file-system is full, we run GC which starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * the current GC head LEB). The @c->gc_lnum is -1, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * that GC will retain LEB X and will try to continue. Imagine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * that LEB X is currently the dirtiest LEB, and the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * used space in LEB Y is exactly the same as amount of free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * space in LEB X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * And a power cut happens when nodes are moved from LEB X to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * LEB Y. We are here trying to recover LEB Y which is the GC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * head LEB. We find the min. I/O unit B as described above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * Then we clean-up LEB Y by padding min. I/O unit. And later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * does not match because the amount of valid nodes there does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * not fit the free space in LEB Y any more! And this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * because of the padding node which we added to LEB Y. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * user-visible effect of this which I once observed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * analysed is that we cannot mount the file-system with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * -ENOSPC error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * So obviously, to make sure that situation does not happen we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * should free min. I/O unit B in LEB Y completely and the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * used min. I/O unit in LEB Y should be A. This is basically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * what the below code tries to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) while (offs > min_io_unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) drop_last_node(sleb, &offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) buf = sbuf + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) len = c->leb_size - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) clean_buf(c, &buf, lnum, &offs, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ubifs_end_scan(c, sleb, lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err = fix_unclean_leb(c, sleb, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) corrupted_rescan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Re-scan the corrupted data with verbose messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ubifs_err(c, "corruption %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) corrupted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ubifs_scanned_corruption(c, lnum, offs, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) err = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ubifs_err(c, "LEB %d scanning failed", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * get_cs_sqnum - get commit start sequence number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * @lnum: LEB number of commit start node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @offs: offset of commit start node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * @cs_sqnum: commit start sequence number is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned long long *cs_sqnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct ubifs_cs_node *cs_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int err, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dbg_rcvry("at %d:%d", lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!cs_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) err = ubifs_leb_read(c, lnum, (void *)cs_node, offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) UBIFS_CS_NODE_SZ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (err && err != -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (ret != SCANNED_A_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ubifs_err(c, "Not a valid node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (cs_node->ch.node_type != UBIFS_CS_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ubifs_err(c, "Not a CS node, type is %d", cs_node->ch.node_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) (unsigned long long)le64_to_cpu(cs_node->cmt_no),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) c->cmt_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dbg_rcvry("commit start sqnum %llu", *cs_sqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) kfree(cs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ubifs_err(c, "failed to get CS sqnum");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) kfree(cs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * ubifs_recover_log_leb - scan and recover a log LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * @lnum: LEB number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * @offs: offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * This function does a scan of a LEB, but caters for errors that might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * been caused by unclean reboots from which we are attempting to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * (assume that only the last log LEB can be corrupted by an unclean reboot).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int offs, void *sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct ubifs_scan_leb *sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int next_lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dbg_rcvry("LEB %d", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) next_lnum = lnum + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) next_lnum = UBIFS_LOG_LNUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (next_lnum != c->ltail_lnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * We can only recover at the end of the log, so check that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * next log LEB is empty or out of date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (IS_ERR(sleb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return sleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (sleb->nodes_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct ubifs_scan_node *snod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned long long cs_sqnum = c->cs_sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) snod = list_entry(sleb->nodes.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct ubifs_scan_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (cs_sqnum == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) err = get_cs_sqnum(c, lnum, offs, &cs_sqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (snod->sqnum > cs_sqnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ubifs_err(c, "unrecoverable log corruption in LEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return ERR_PTR(-EUCLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ubifs_scan_destroy(sleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * recover_head - recover a head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * @lnum: LEB number of head to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * @offs: offset of head to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * This function ensures that there is no data on the flash at a head location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int len = c->max_write_size, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (offs + len > c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) len = c->leb_size - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Read at the head location and check it is empty flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (err || !is_empty(sbuf, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dbg_rcvry("cleaning head at %d:%d", lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (offs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return ubifs_leb_unmap(c, lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return ubifs_leb_change(c, lnum, sbuf, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * ubifs_recover_inl_heads - recover index and LPT heads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * This function ensures that there is no data on the flash at the index and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * LPT head locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * This deals with the recovery of a half-completed journal commit. UBIFS is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * careful never to overwrite the last version of the index or the LPT. Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * the index and LPT are wandering trees, data from a half-completed commit will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * not be referenced anywhere in UBIFS. The data will be either in LEBs that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * assumed to be empty and will be unmapped anyway before use, or in the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * and LPT heads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ubifs_assert(c, !c->ro_mount || c->remounting_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * clean_an_unclean_leb - read and write a LEB to remove corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * @ucleb: unclean LEB information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * This function reads a LEB up to a point pre-determined by the mount recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * checks the nodes, and writes the result back to the flash, thereby cleaning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * off any following corruption, or non-fatal ECC errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int clean_an_unclean_leb(struct ubifs_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct ubifs_unclean_leb *ucleb, void *sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) void *buf = sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dbg_rcvry("LEB %d len %d", lnum, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* Nothing to read, just unmap it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return ubifs_leb_unmap(c, lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (err && err != -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) while (len >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* Scan quietly until there is an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (ret == SCANNED_A_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* A valid node, and not a padding node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct ubifs_ch *ch = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) node_len = ALIGN(le32_to_cpu(ch->len), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) offs += node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) buf += node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) len -= node_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Padding bytes or a valid padding node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) offs += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) buf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ret == SCANNED_EMPTY_SPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ubifs_err(c, "unexpected empty space at %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) lnum, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (quiet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Redo the last scan but noisily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) quiet = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ubifs_scanned_corruption(c, lnum, offs, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* Pad to min_io_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) len = ALIGN(ucleb->endpt, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (len > ucleb->endpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int pad_len = len - ALIGN(ucleb->endpt, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (pad_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) buf = c->sbuf + len - pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ubifs_pad(c, buf, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Write back the LEB atomically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) err = ubifs_leb_change(c, lnum, sbuf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dbg_rcvry("cleaned LEB %d", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * ubifs_clean_lebs - clean LEBs recovered during read-only mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * @sbuf: LEB-sized buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * This function cleans a LEB identified during recovery that needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * written but was not because UBIFS was mounted read-only. This happens when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * remounting to read-write mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) dbg_rcvry("recovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) while (!list_empty(&c->unclean_leb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct ubifs_unclean_leb *ucleb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ucleb = list_entry(c->unclean_leb_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct ubifs_unclean_leb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err = clean_an_unclean_leb(c, ucleb, sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) list_del(&ucleb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) kfree(ucleb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * grab_empty_leb - grab an empty LEB to use as GC LEB and run commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * This is a helper function for 'ubifs_rcvry_gc_commit()' which grabs an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * LEB to be used as GC LEB (@c->gc_lnum), and then runs the commit. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * zero in case of success and a negative error code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static int grab_empty_leb(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int lnum, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Note, it is very important to first search for an empty LEB and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * run the commit, not vice-versa. The reason is that there might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * only one empty LEB at the moment, the one which has been the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * @c->gc_lnum just before the power cut happened. During the regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * UBIFS operation (not now) @c->gc_lnum is marked as "taken", so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * one but GC can grab it. But at this moment this single empty LEB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * not marked as taken, so if we run commit - what happens? Right, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * commit will grab it and write the index there. Remember that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * index always expands as long as there is free space, and it only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * starts consolidating when we run out of space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * IOW, if we run commit now, we might not be able to find a free LEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) lnum = ubifs_find_free_leb_for_idx(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (lnum < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ubifs_err(c, "could not find an empty LEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ubifs_dump_lprops(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ubifs_dump_budg(c, &c->bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* Reset the index flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) LPROPS_INDEX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) c->gc_lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) dbg_rcvry("found empty LEB %d, run commit", lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return ubifs_run_commit(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * Out-of-place garbage collection requires always one empty LEB with which to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * start garbage collection. The LEB number is recorded in c->gc_lnum and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * written to the master node on unmounting. In the case of an unclean unmount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * the value of gc_lnum recorded in the master node is out of date and cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * be used. Instead, recovery must allocate an empty LEB for this purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * However, there may not be enough empty space, in which case it must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * possible to GC the dirtiest LEB into the GC head LEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * This function also runs the commit which causes the TNC updates from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * size-recovery and orphans to be written to the flash. That is important to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * ensure correct replay order for subsequent mounts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * This function returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int ubifs_rcvry_gc_commit(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct ubifs_lprops lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dbg_rcvry("GC head LEB %d, offs %d", wbuf->lnum, wbuf->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) c->gc_lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (wbuf->lnum == -1 || wbuf->offs == c->leb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return grab_empty_leb(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (err != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) dbg_rcvry("could not find a dirty LEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return grab_empty_leb(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ubifs_assert(c, !(lp.flags & LPROPS_INDEX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ubifs_assert(c, lp.free + lp.dirty >= wbuf->offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * We run the commit before garbage collection otherwise subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * mounts will see the GC and orphan deletion in a different order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) dbg_rcvry("committing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) err = ubifs_run_commit(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dbg_rcvry("GC'ing LEB %d", lp.lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) err = ubifs_garbage_collect_leb(c, &lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int err2 = ubifs_wbuf_sync_nolock(wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) mutex_unlock(&wbuf->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ubifs_err(c, "GC failed, error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ubifs_assert(c, err == LEB_RETAINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (err != LEB_RETAINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err = ubifs_leb_unmap(c, c->gc_lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dbg_rcvry("allocated LEB %d for GC", lp.lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * struct size_entry - inode size information for recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * @rb: link in the RB-tree of sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * @inum: inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * @i_size: size on inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * @d_size: maximum size based on data nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * @exists: indicates whether the inode exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * @inode: inode if pinned in memory awaiting rw mode to fix it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct size_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct rb_node rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ino_t inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) loff_t d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) int exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * add_ino - add an entry to the size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * @inum: inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * @i_size: size on inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @d_size: maximum size based on data nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @exists: indicates whether the inode exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) loff_t d_size, int exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct size_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) e = rb_entry(parent, struct size_entry, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (inum < e->inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) e->inum = inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) e->i_size = i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) e->d_size = d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) e->exists = exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) rb_link_node(&e->rb, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) rb_insert_color(&e->rb, &c->size_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * find_ino - find an entry on the size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * @inum: inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct rb_node *p = c->size_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct size_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) e = rb_entry(p, struct size_entry, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (inum < e->inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) else if (inum > e->inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * remove_ino - remove an entry from the size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * @inum: inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static void remove_ino(struct ubifs_info *c, ino_t inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct size_entry *e = find_ino(c, inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) rb_erase(&e->rb, &c->size_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * ubifs_destroy_size_tree - free resources related to the size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void ubifs_destroy_size_tree(struct ubifs_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct size_entry *e, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) iput(e->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) c->size_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * ubifs_recover_size_accum - accumulate inode sizes for recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * @key: node key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * @deletion: node is for a deletion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * @new_size: inode size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * This function has two purposes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * 1) to ensure there are no data nodes that fall outside the inode size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * 2) to ensure there are no data nodes for inodes that do not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * To accomplish those purposes, a rb-tree is constructed containing an entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * for each inode number in the journal that has not been deleted, and recording
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * the size from the inode node, the maximum size of any data node (also altered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * by truncations) and a flag indicating a inode number for which no inode node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * was present in the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * Note that there is still the possibility that there are data nodes that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * been committed that are beyond the inode size, however the only way to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * them would be to scan the entire index. Alternatively, some provision could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * be made to record the size of inodes at the start of commit, which would seem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * very cumbersome for a scenario that is quite unlikely and the only negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * consequence of which is wasted space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * This functions returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) int deletion, loff_t new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ino_t inum = key_inum(c, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct size_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) switch (key_type(c, key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case UBIFS_INO_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (deletion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) remove_ino(c, inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) e = find_ino(c, inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) e->i_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) e->exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) err = add_ino(c, inum, new_size, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) case UBIFS_DATA_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) e = find_ino(c, inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (new_size > e->d_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) e->d_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) err = add_ino(c, inum, 0, new_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case UBIFS_TRUN_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) e = find_ino(c, inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) e->d_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * fix_size_in_place - fix inode size in place on flash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * @e: inode size information for recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct ubifs_ino_node *ino = c->sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) union ubifs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int err, lnum, offs, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) uint32_t crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* Locate the inode node LEB number and offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ino_key_init(c, &key, e->inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * If the size recorded on the inode node is greater than the size that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * was calculated from nodes in the journal then don't change the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) i_size = le64_to_cpu(ino->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (i_size >= e->d_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* Read the LEB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* Change the size field and recalculate the CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ino = c->sbuf + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ino->size = cpu_to_le64(e->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) len = le32_to_cpu(ino->ch.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) crc = crc32(UBIFS_CRC32_INIT, (void *)ino + 8, len - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ino->ch.crc = cpu_to_le32(crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* Work out where data in the LEB ends and free space begins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) p = c->sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) len = c->leb_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) while (p[len] == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) len -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) len = ALIGN(len + 1, c->min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* Atomically write the fixed LEB back again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) err = ubifs_leb_change(c, lnum, c->sbuf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) (unsigned long)e->inum, e->i_size, e->d_size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * inode_fix_size - fix inode size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * @e: inode size information for recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static int inode_fix_size(struct ubifs_info *c, struct size_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct ubifs_inode *ui;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (c->ro_mount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ubifs_assert(c, !e->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (e->inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Remounting rw, pick up inode we stored earlier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) inode = e->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) inode = ubifs_iget(c->vfs_sb, e->inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (inode->i_size >= e->d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * The original inode in the index already has a size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * big enough, nothing to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dbg_rcvry("ino %lu size %lld -> %lld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) (unsigned long)e->inum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) inode->i_size, e->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ui = ubifs_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) inode->i_size = e->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) ui->ui_size = e->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ui->synced_i_size = e->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) e->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * In readonly mode just keep the inode pinned in memory until we go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * readwrite. In readwrite mode write the inode to the journal with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * fixed size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (c->ro_mount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) err = ubifs_jnl_write_inode(c, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) rb_erase(&e->rb, &c->size_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * ubifs_recover_size - recover inode size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * @in_place: If true, do a in-place size fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * This function attempts to fix inode size discrepancies identified by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * 'ubifs_recover_size_accum()' function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * This functions returns %0 on success and a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int ubifs_recover_size(struct ubifs_info *c, bool in_place)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct rb_node *this = rb_first(&c->size_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) while (this) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct size_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) e = rb_entry(this, struct size_entry, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) this = rb_next(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!e->exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) union ubifs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ino_key_init(c, &key, e->inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) err = ubifs_tnc_lookup(c, &key, c->sbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (err && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* Remove data nodes that have no inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dbg_rcvry("removing ino %lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) (unsigned long)e->inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) err = ubifs_tnc_remove_ino(c, e->inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct ubifs_ino_node *ino = c->sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) e->exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) e->i_size = le64_to_cpu(ino->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (e->exists && e->i_size < e->d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ubifs_assert(c, !(c->ro_mount && in_place));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * We found data that is outside the found inode size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * fixup the inode size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (in_place) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) err = fix_size_in_place(c, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) iput(e->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) err = inode_fix_size(c, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) rb_erase(&e->rb, &c->size_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }