^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * JFFS2 -- Journalling Flash File System, Version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright © 2001-2007 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Created by David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * For licensing information, see the file 'LICENCE' in this directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "nodelist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "summary.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DEFAULT_EMPTY_SCAN_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define noisy_printk(noise, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (*(noise)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pr_notice(fmt, ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) (*(noise))--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!(*(noise))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pr_notice("Further such events for this erase block will not be printed\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static uint32_t pseudo_random;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Returning an error will abort the mount - bad checksums etc. should just mark the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * as dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline int min_free(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return c->wbuf_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return DEFAULT_EMPTY_SCAN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Turned wasted size into dirty, since we apparently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) think it's recoverable now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) jeb->dirty_size += jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) c->dirty_size += jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) c->wasted_size -= jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) jeb->wasted_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (VERYDIRTY(c, jeb->dirty_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) list_add(&jeb->list, &c->very_dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) list_add(&jeb->list, &c->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int jffs2_scan_medium(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) uint32_t empty_blocks = 0, bad_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned char *flashbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uint32_t buf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifndef __ECOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) size_t pointlen, try_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) (void **)&flashbuf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!ret && pointlen < c->mtd->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Don't muck about if it won't let us point to the whole flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pointlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mtd_unpoint(c->mtd, 0, pointlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) flashbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (ret && ret != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) jffs2_dbg(1, "MTD point failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!flashbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* For NAND it's quicker to read a whole eraseblock at a time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) apparently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (jffs2_cleanmarker_oob(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) try_size = c->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) try_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) jffs2_dbg(1, "Trying to allocate readbuf of %zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "bytes\n", try_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!flashbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) try_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) buf_size = (uint32_t)try_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) JFFS2_WARNING("Can't allocate memory for summary\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) for (i=0; i<c->nr_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct jffs2_eraseblock *jeb = &c->blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* reset summary info for next eraseblock scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) jffs2_sum_reset_collected(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) buf_size, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Now decide which list to put it on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) switch(ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) case BLK_STATE_ALLFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Empty block. Since we can't be sure it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * was entirely erased, we just queue it for erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * again. It will be marked as such when the erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * is complete. Meanwhile we still count it as empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * for later checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) empty_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) list_add(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case BLK_STATE_CLEANMARKER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Only a CLEANMARKER node is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!jeb->dirty_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* It's actually free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) list_add(&jeb->list, &c->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) c->nr_free_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Dirt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) list_add(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) case BLK_STATE_CLEAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Full (or almost full) of clean data. Clean list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) list_add(&jeb->list, &c->clean_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case BLK_STATE_PARTDIRTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Some data, but not full. Dirty list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* We want to remember the block with most free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) and stick it in the 'nextblock' position to start writing to it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (jeb->free_size > min_free(c) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Better candidate for the next writes to go to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (c->nextblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = file_dirty(c, c->nextblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* deleting summary information of the old nextblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) jffs2_sum_reset_collected(c->summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* update collected summary information for the current nextblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) jffs2_sum_move_collected(c, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __func__, jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) c->nextblock = jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = file_dirty(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case BLK_STATE_ALLDIRTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Nothing valid - not even a clean marker. Needs erasing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* For now we just put it on the erasing list. We'll start the erases later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) list_add(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) case BLK_STATE_BADBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) list_add(&jeb->list, &c->bad_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) c->bad_size += c->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) c->free_size -= c->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bad_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_warn("%s(): unknown block state\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (c->nextblock && (c->nextblock->dirty_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) c->nextblock->wasted_size += c->nextblock->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) c->wasted_size += c->nextblock->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) c->dirty_size -= c->nextblock->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) c->nextblock->dirty_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* If we're going to start writing into a block which already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) contains data, and the end of the data isn't page-aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) skip a little and align it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) __func__, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) jffs2_scan_dirty_space(c, c->nextblock, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (c->nr_erasing_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!c->used_size && !c->unchecked_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) empty_blocks, bad_blocks, c->nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) jffs2_garbage_collect_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) jffs2_sum_reset_collected(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) out_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kfree(flashbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #ifndef __ECOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mtd_unpoint(c->mtd, 0, c->mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) uint32_t ofs, uint32_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) len, ofs, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (retlen < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ofs, retlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) && (!jeb->first_node || !ref_next(jeb->first_node)) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return BLK_STATE_CLEANMARKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* move blocks with max 4 byte dirty space to cleanlist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) c->dirty_size -= jeb->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) c->wasted_size += jeb->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) jeb->wasted_size += jeb->dirty_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) jeb->dirty_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return BLK_STATE_CLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else if (jeb->used_size || jeb->unchecked_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return BLK_STATE_PARTDIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return BLK_STATE_ALLDIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #ifdef CONFIG_JFFS2_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct jffs2_raw_xattr *rx, uint32_t ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct jffs2_summary *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct jffs2_xattr_datum *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) uint32_t xid, version, totlen, crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (crc != je32_to_cpu(rx->node_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ofs, je32_to_cpu(rx->node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xid = je32_to_cpu(rx->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) version = je32_to_cpu(rx->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) totlen = PAD(sizeof(struct jffs2_raw_xattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (totlen != je32_to_cpu(rx->totlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ofs, je32_to_cpu(rx->totlen), totlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) xd = jffs2_setup_xattr_datum(c, xid, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (IS_ERR(xd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return PTR_ERR(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (xd->version > version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct jffs2_raw_node_ref *raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) raw->next_in_ino = xd->node->next_in_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) xd->node->next_in_ino = raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) xd->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) xd->xprefix = rx->xprefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) xd->name_len = rx->name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) xd->value_len = je16_to_cpu(rx->value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) xd->data_crc = je32_to_cpu(rx->data_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (jffs2_sum_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ofs, xd->xid, xd->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct jffs2_raw_xref *rr, uint32_t ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct jffs2_summary *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct jffs2_xattr_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) uint32_t crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) crc = crc32(0, rr, sizeof(*rr) - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (crc != je32_to_cpu(rr->node_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ofs, je32_to_cpu(rr->node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ofs, je32_to_cpu(rr->totlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) PAD(sizeof(struct jffs2_raw_xref)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ref = jffs2_alloc_xattr_ref();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* BEFORE jffs2_build_xattr_subsystem() called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * and AFTER xattr_ref is marked as a dead xref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * ref->xid is used to store 32bit xid, xd is not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * ref->ino is used to store 32bit inode-number, ic is not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Thoes variables are declared as union, thus using those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * are exclusive. In a similar way, ref->next is temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * used to chain all xattr_ref object. It's re-chained to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ref->ino = je32_to_cpu(rr->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ref->xid = je32_to_cpu(rr->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ref->xseqno = je32_to_cpu(rr->xseqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ref->xseqno > c->highest_xseqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ref->next = c->xref_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) c->xref_temp = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (jffs2_sum_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ofs, ref->xid, ref->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) the flash, XIP-style */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct jffs2_unknown_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct jffs2_unknown_node crcnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) uint32_t ofs, prevofs, max_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) uint32_t hdr_crc, buf_ofs, buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int noise = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int cleanmarkerfound = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ofs = jeb->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) prevofs = jeb->offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (jffs2_cleanmarker_oob(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (mtd_block_isbad(c->mtd, jeb->offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return BLK_STATE_BADBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = jffs2_check_nand_cleanmarker(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Even if it's not found, we still scan to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if the block is empty. We use this information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) to decide whether to erase it or not. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case 0: cleanmarkerfound = 1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case 1: break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) default: return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct jffs2_sum_marker *sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void *sumptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) uint32_t sumlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* XIP case. Just look, point at the summary if it's there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sm = (void *)buf + c->sector_size - sizeof(*sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sumptr = buf + je32_to_cpu(sm->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) sumlen = c->sector_size - je32_to_cpu(sm->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* If NAND flash, read a whole page of it. Else just the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (c->wbuf_pagesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) buf_len = c->wbuf_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) buf_len = sizeof(*sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Read as much as we want into the _end_ of the preallocated buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) jeb->offset + c->sector_size - buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sm = (void *)buf + buf_size - sizeof(*sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) sumlen = c->sector_size - je32_to_cpu(sm->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sumptr = buf + buf_size - sumlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* sm->offset maybe wrong but MAGIC maybe right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (sumlen > c->sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto full_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Now, make sure the summary itself is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (sumlen > buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Need to kmalloc for this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sumptr = kmalloc(sumlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!sumptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (buf_len < sumlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Need to read more so that the entire summary node is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) err = jffs2_fill_scan_buf(c, sumptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) jeb->offset + c->sector_size - sumlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) sumlen - buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (sumlen > buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) kfree(sumptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (sumptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (buf_size && sumlen > buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kfree(sumptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* If it returns with a real error, bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) If it returns positive, that's a block classification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) (i.e. BLK_STATE_xxx) so return that too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) If it returns zero, fall through to full scan. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) full_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) buf_ofs = jeb->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* This is the XIP case -- we're reading _directly_ from the flash chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) buf_len = c->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) buf_len = EMPTY_SCAN_SIZE(c->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (ofs == max_ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (jffs2_cleanmarker_oob(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* scan oob, take care of cleanmarker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) case 1: return BLK_STATE_ALLDIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) default: return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (c->cleanmarker_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) jeb->offset + ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* Now ofs is a complete physical flash offset as it always was... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ofs += jeb->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) noise = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) scan_more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) while(ofs < jeb->offset + c->sector_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Make sure there are node refs available for use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (ofs & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ofs = PAD(ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (ofs == prevofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_warn("ofs 0x%08x has already been seen. Skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) prevofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sizeof(struct jffs2_unknown_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) jeb->offset, c->sector_size, ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sizeof(*node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (buf_ofs + buf_len < ofs + sizeof(*node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sizeof(struct jffs2_unknown_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) buf_len, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) uint32_t inbuf_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) uint32_t empty_start, scan_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) empty_start = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) more_empty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) inbuf_ofs = ofs - buf_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) while (inbuf_ofs < scan_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) empty_start, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) goto scan_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) inbuf_ofs+=4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Ran off end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* If we're only checking the beginning of a block with a cleanmarker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) bail now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) EMPTY_SCAN_SIZE(c->sector_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return BLK_STATE_CLEANMARKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) scan_end = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto more_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* See how much more there is to read in this eraseblock... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* No more to read. Break out of main loop without marking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) this range of empty space as dirty (because it's not) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) empty_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* point never reaches here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) scan_end = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) buf_len, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) goto more_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* OK. We're out of possibilities. Whinge and move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) JFFS2_MAGIC_BITMASK, ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) je16_to_cpu(node->magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* We seem to have a node of sorts. Check the CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) crcnode.magic = node->magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) crcnode.totlen = node->totlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ofs, je16_to_cpu(node->magic),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) je16_to_cpu(node->nodetype),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) je32_to_cpu(node->totlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) je32_to_cpu(node->hdr_crc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) hdr_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Eep. Node goes over the end of the erase block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ofs, je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_warn("Perhaps the file system was created with the wrong erase size?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ofs += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Wheee. This is an obsoleted node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) switch(je16_to_cpu(node->nodetype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) case JFFS2_NODETYPE_INODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) sizeof(struct jffs2_raw_inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) buf_len, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) node = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (err) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) case JFFS2_NODETYPE_DIRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) je32_to_cpu(node->totlen), buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) node = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (err) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #ifdef CONFIG_JFFS2_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) case JFFS2_NODETYPE_XATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) je32_to_cpu(node->totlen), buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) node = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case JFFS2_NODETYPE_XREF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) je32_to_cpu(node->totlen), buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) buf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) node = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #endif /* CONFIG_JFFS2_FS_XATTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) case JFFS2_NODETYPE_CLEANMARKER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ofs, je32_to_cpu(node->totlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) c->cleanmarker_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ofs += PAD(sizeof(struct jffs2_unknown_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) } else if (jeb->first_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ofs, jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ofs += PAD(sizeof(struct jffs2_unknown_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ofs += PAD(c->cleanmarker_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) case JFFS2_NODETYPE_PADDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (jffs2_sum_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) case JFFS2_FEATURE_ROCOMPAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) je16_to_cpu(node->nodetype), ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) c->flags |= JFFS2_SB_FLAG_RO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!(jffs2_is_readonly(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) case JFFS2_FEATURE_INCOMPAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) je16_to_cpu(node->nodetype), ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) case JFFS2_FEATURE_RWCOMPAT_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) je16_to_cpu(node->nodetype), ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) case JFFS2_FEATURE_RWCOMPAT_COPY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) je16_to_cpu(node->nodetype), ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* We can't summarise nodes we don't grok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) jffs2_sum_disable_collecting(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ofs += PAD(je32_to_cpu(node->totlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) dbg_summary("There is not enough space for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) "summary information, disabling for this jeb!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) jffs2_sum_disable_collecting(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) jeb->offset, jeb->free_size, jeb->dirty_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* mark_node_obsolete can add to wasted !! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (jeb->wasted_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) jeb->dirty_size += jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) c->dirty_size += jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) c->wasted_size -= jeb->wasted_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) jeb->wasted_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return jffs2_scan_classify_jeb(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct jffs2_inode_cache *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ic = jffs2_get_ino_cache(c, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (ino > c->highest_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) c->highest_ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ic = jffs2_alloc_inode_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) pr_notice("%s(): allocation of inode cache failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) memset(ic, 0, sizeof(*ic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ic->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ic->nodes = (void *)ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) jffs2_add_ino_cache(c, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (ino == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ic->pino_nlink = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct jffs2_inode_cache *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) uint32_t crc, ino = je32_to_cpu(ri->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* We do very little here now. Just check the ino# to which we should attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) this node; we can do all the CRC checking etc. later. There's a tradeoff here --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) we used to scan the flash once only, reading everything we want from it into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) memory, then building all our in-core data structures and freeing the extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) information. Now we allow the first part of the mount to complete a lot quicker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) but we have to go _back_ to the flash in order to finish the CRC checking, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) Which means that the _full_ amount of time to get to proper write mode with GC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) operational may actually be _longer_ than before. Sucks to be me. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Check the node CRC in any case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) crc = crc32(0, ri, sizeof(*ri)-8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (crc != je32_to_cpu(ri->node_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) __func__, ofs, je32_to_cpu(ri->node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * We believe totlen because the CRC on the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * _header_ was OK, just the node itself failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return jffs2_scan_dirty_space(c, jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) PAD(je32_to_cpu(ri->totlen)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ic = jffs2_get_ino_cache(c, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ic = jffs2_scan_make_ino_cache(c, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (!ic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* Wheee. It worked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) je32_to_cpu(ri->offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) pseudo_random += je32_to_cpu(ri->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct jffs2_full_dirent *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct jffs2_inode_cache *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) uint32_t checkedlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) uint32_t crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* We don't get here unless the node is still valid, so we don't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mask in the ACCURATE bit any more. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) crc = crc32(0, rd, sizeof(*rd)-8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (crc != je32_to_cpu(rd->node_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) __func__, ofs, je32_to_cpu(rd->node_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) pseudo_random += je32_to_cpu(rd->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Should never happen. Did. (OLPC trac #4184)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) checkedlen = strnlen(rd->name, rd->nsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (checkedlen < rd->nsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ofs, checkedlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) fd = jffs2_alloc_full_dirent(checkedlen+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) memcpy(&fd->name, rd->name, checkedlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) fd->name[checkedlen] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) crc = crc32(0, fd->name, checkedlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (crc != je32_to_cpu(rd->name_crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) __func__, ofs, je32_to_cpu(rd->name_crc), crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) fd->name, je32_to_cpu(rd->ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) jffs2_free_full_dirent(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* FIXME: Why do we believe totlen? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) jffs2_free_full_dirent(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) PAD(je32_to_cpu(rd->totlen)), ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) fd->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) fd->version = je32_to_cpu(rd->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) fd->ino = je32_to_cpu(rd->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) fd->type = rd->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int count_list(struct list_head *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) uint32_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct list_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) list_for_each(tmp, l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Note: This breaks if list_empty(head). I don't care. You
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) might, if you copy this code and use it elsewhere :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void rotate_list(struct list_head *head, uint32_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct list_head *n = head->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) list_del(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) while(count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) n = n->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) list_add(head, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) void jffs2_rotate_lists(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) uint32_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) uint32_t rotateby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) x = count_list(&c->clean_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rotateby = pseudo_random % x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) rotate_list((&c->clean_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) x = count_list(&c->very_dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) rotateby = pseudo_random % x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) rotate_list((&c->very_dirty_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) x = count_list(&c->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rotateby = pseudo_random % x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) rotate_list((&c->dirty_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) x = count_list(&c->erasable_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) rotateby = pseudo_random % x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rotate_list((&c->erasable_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (c->nr_erasing_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) rotateby = pseudo_random % c->nr_erasing_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) rotate_list((&c->erase_pending_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (c->nr_free_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) rotateby = pseudo_random % c->nr_free_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) rotate_list((&c->free_list), rotateby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }