^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * JFFS2 -- Journalling Flash File System, Version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright © 2001-2007 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Created by David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * For licensing information, see the file 'LICENCE' in this directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mtd/rawnand.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "nodelist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* For testing write failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #undef BREAKME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #undef BREAKMEHEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef BREAKME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned char *brokenbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* max. erase failures before we mark a block bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MAX_ERASE_FAILURES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct jffs2_inodirty {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) uint32_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct jffs2_inodirty *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct jffs2_inodirty inodirty_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct jffs2_inodirty *this = c->wbuf_inodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* If a malloc failed, consider _everything_ dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (this == &inodirty_nomem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* If ino == 0, _any_ non-GC writes mean 'yes' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (this && !ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Look to see if the inode in question is pending in the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) while (this) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (this->ino == ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) this = this->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct jffs2_inodirty *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) this = c->wbuf_inodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (this != &inodirty_nomem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) while (this) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct jffs2_inodirty *next = this->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) kfree(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) this = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) c->wbuf_inodes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct jffs2_inodirty *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Schedule delayed write-buffer write-out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) jffs2_dirty_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (jffs2_wbuf_pending_for_ino(c, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) new = kmalloc(sizeof(*new), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) jffs2_clear_wbuf_ino_list(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) c->wbuf_inodes = &inodirty_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) new->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) new->next = c->wbuf_inodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) c->wbuf_inodes = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct list_head *this, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (list_empty(&c->erasable_pending_wbuf_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) list_del(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if ((jiffies + (n++)) & 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Most of the time, we just erase it immediately. Otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spend ages scanning it on mount, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) jffs2_dbg(1, "...and adding to erase_pending_list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_add_tail(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) jffs2_garbage_collect_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Sometimes, however, we leave it elsewhere so it doesn't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) immediately reused, and we spread the load a bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) jffs2_dbg(1, "...and adding to erasable_list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) list_add_tail(&jeb->list, &c->erasable_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define REFILE_NOTEMPTY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define REFILE_ANYWAY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* File the existing block on the bad_used_list.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (c->nextblock == jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) c->nextblock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) else /* Not sure this should ever happen... need more coffee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) list_del(&jeb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (jeb->first_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) list_add(&jeb->list, &c->bad_used_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) BUG_ON(allow_empty == REFILE_NOTEMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* It has to have had some nodes or we couldn't be here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) list_add(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) jffs2_garbage_collect_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) uint32_t oldfree = jeb->free_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) jffs2_link_node_ref(c, jeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) oldfree, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* convert to wasted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) c->wasted_size += oldfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) jeb->wasted_size += oldfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) c->dirty_size -= oldfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) jeb->dirty_size -= oldfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) jffs2_dbg_dump_block_lists_nolock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) jffs2_dbg_acct_sanity_check_nolock(c,jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct jffs2_inode_info *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct jffs2_raw_node_ref *raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) union jffs2_node_union *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct jffs2_node_frag *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct jffs2_full_dirent *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) je16_to_cpu(node->u.magic) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) switch (je16_to_cpu(node->u.nodetype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case JFFS2_NODETYPE_INODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (f->metadata && f->metadata->raw == raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return &f->metadata->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) BUG_ON(!frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Find a frag which refers to the full_dnode we want to modify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) while (!frag->node || frag->node->raw != raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) frag = frag_next(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BUG_ON(!frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return &frag->node->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case JFFS2_NODETYPE_DIRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) for (fd = f->dents; fd; fd = fd->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (fd->raw == raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return &fd->raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dbg_noderef("Don't care about replacing raw for nodetype %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) je16_to_cpu(node->u.nodetype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) uint32_t ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) char *eccstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_warn("%s(): Read back of page at %08x failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __func__, c->wbuf_ofs, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } else if (retlen != c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __func__, ofs, retlen, c->wbuf_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (ret == -EUCLEAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) eccstr = "corrected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) else if (ret == -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) eccstr = "correction failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) eccstr = "OK or unused";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) eccstr, c->wbuf_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) c->wbuf, c->wbuf_pagesize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_warn("Read back:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) c->wbuf_verify, c->wbuf_pagesize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define jffs2_verify_write(c,b,o) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Recover from failure to write wbuf. Recover the nodes up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * wbuf, not the one which we were starting to try to write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct jffs2_eraseblock *jeb, *new_jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int nr_refile = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) uint32_t start, end, ofs, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (c->wbuf_ofs % c->mtd->erasesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) jffs2_block_refile(c, jeb, REFILE_ANYWAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) BUG_ON(!ref_obsolete(jeb->last_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Find the first node to be recovered, by skipping over every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) node which ends before the wbuf starts, or which is obsolete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) for (next = raw = jeb->first_node; next; raw = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) next = ref_next(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ref_obsolete(raw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) (next && ref_offset(next) <= c->wbuf_ofs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ref_offset(raw), ref_flags(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) (ref_offset(raw) + ref_totlen(c, jeb, raw)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) c->wbuf_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ref_offset(raw), ref_flags(raw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) (ref_offset(raw) + ref_totlen(c, jeb, raw)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) first_raw = raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!first_raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* All nodes were obsolete. Nothing to recover. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) c->wbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) start = ref_offset(first_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) end = ref_offset(jeb->last_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) nr_refile = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Count the number of refs which need to be copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) while ((raw = ref_next(raw)) != jeb->last_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) nr_refile++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) start, end, end - start, nr_refile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (start < c->wbuf_ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* First affected node was already partially written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Attempt to reread the old data into our buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) buf = kmalloc(end - start, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) goto read_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Do the read... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* ECC recovered ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if ((ret == -EUCLEAN || ret == -EBADMSG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) (retlen == c->wbuf_ofs - start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (ret || retlen != c->wbuf_ofs - start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) read_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) first_raw = ref_next(first_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) nr_refile--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) while (first_raw && ref_obsolete(first_raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) first_raw = ref_next(first_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) nr_refile--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* If this was the only node to be recovered, give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!first_raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) c->wbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* It wasn't. Go on and try to recover nodes complete in the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) start = ref_offset(first_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) start, end, end - start, nr_refile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Read succeeded. Copy the remaining data from the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) Either 'buf' contains the data, or we find it in the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* ... and get an allocation of space from a shiny new block instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* The summary is not recovered, so it must be disabled for this erase block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) jffs2_sum_disable_collecting(c->summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ofs = write_ofs(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (end-start >= c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Need to do another write immediately, but it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) that this is just because the wbuf itself is completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) full, and there's nothing earlier read back from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) flash. Hence 'buf' isn't necessarily what we're writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) from. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unsigned char *rewrite_buf = buf?:c->wbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) towrite, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #ifdef BREAKMEHEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int breakme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (breakme++ == 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) pr_notice("Faking write error at 0x%08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) breakme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = mtd_write(c->mtd, ofs, towrite, &retlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rewrite_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Argh. We tried. Really we did. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pr_crit("Recovery of wbuf failed due to a second write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (retlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) c->wbuf_len = (end - start) - towrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) c->wbuf_ofs = ofs + towrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* OK, now we're left with the dregs in whichever buffer we're using */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) memcpy(c->wbuf, buf, end-start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) c->wbuf_ofs = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) c->wbuf_len = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) new_jeb = &c->blocks[ofs / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) uint32_t rawlen = ref_totlen(c, jeb, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct jffs2_inode_cache *ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct jffs2_raw_node_ref *new_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct jffs2_raw_node_ref **adjust_ref = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct jffs2_inode_info *f = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) rawlen, ref_offset(raw), ref_flags(raw), ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ic = jffs2_raw_ref_to_ic(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Ick. This XATTR mess should be fixed shortly... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct jffs2_xattr_datum *xd = (void *)ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) BUG_ON(xd->node != raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) adjust_ref = &xd->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) raw->next_in_ino = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct jffs2_xattr_datum *xr = (void *)ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) BUG_ON(xr->node != raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) adjust_ref = &xr->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) raw->next_in_ino = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct jffs2_raw_node_ref **p = &ic->nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Remove the old node from the per-inode list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) while (*p && *p != (void *)ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (*p == raw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) (*p) = (raw->next_in_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) raw->next_in_ino = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) p = &((*p)->next_in_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* If it's an in-core inode, then we have to adjust any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) full_dirent or full_dnode structure to point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) new version instead of the old */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (IS_ERR(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Should never happen; it _must_ be present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ic->ino, PTR_ERR(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* We don't lock f->sem. There's a number of ways we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) end up in here with it already being locked, and nobody's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) going to modify it on us anyway because we hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) alloc_sem. We're only changing one ->raw pointer too,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) which we can get away with without upsetting readers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) adjust_ref = jffs2_incore_replace_raw(c, f, raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else if (unlikely(ic->state != INO_STATE_PRESENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ic->state != INO_STATE_CHECKEDABSENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ic->state != INO_STATE_GC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (adjust_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) BUG_ON(*adjust_ref != raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *adjust_ref = new_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) jffs2_gc_release_inode(c, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!ref_obsolete(raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) jeb->dirty_size += rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) jeb->used_size -= rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) c->dirty_size += rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) c->used_size -= rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) BUG_ON(raw->next_in_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ofs += rawlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Fix up the original jeb now it's on the bad_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (first_raw == jeb->first_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) list_move(&jeb->list, &c->erase_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) c->nr_erasing_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) jffs2_garbage_collect_trigger(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) jffs2_dbg_acct_sanity_check_nolock(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) c->wbuf_ofs, c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Meaning of pad argument:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 1: Pad, do not adjust nextblock free_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 2: Pad, adjust nextblock free_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #define NOPAD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #define PAD_NOACCOUNT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #define PAD_ACCOUNTING 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct jffs2_eraseblock *wbuf_jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) del_timer() the timer we never initialised. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!jffs2_is_writebuffered(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!mutex_is_locked(&c->alloc_sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!c->wbuf_len) /* already checked c->wbuf above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* claim remaining space on the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) this happens, if we have a change to a new block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) or if fsync forces us to flush the writebuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if we have a switch to next page, we will not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) enough remaining space for this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (pad ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) c->wbuf_len = PAD(c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) with 8 byte page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* else jffs2_flash_writev has actually filled in the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) buffer for us, and will deal with the node refs etc. later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #ifdef BREAKME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static int breakme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (breakme++ == 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) breakme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) brokenbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) &retlen, c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto wfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } else if (retlen != c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) retlen, c->wbuf_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto wfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) wfail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) jffs2_wbuf_recover(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Adjust free size of the block if we padded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (pad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) (wbuf_jeb == c->nextblock) ? "next" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) wbuf_jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* wbuf_pagesize - wbuf_len is the amount of space that's to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) padded. If there is less free space in the block than that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) something screwed up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (wbuf_jeb->free_size < waste) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) c->wbuf_ofs, c->wbuf_len, waste);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) wbuf_jeb->offset, wbuf_jeb->free_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* FIXME: that made it count as dirty. Convert to wasted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) wbuf_jeb->dirty_size -= waste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) c->dirty_size -= waste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) wbuf_jeb->wasted_size += waste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) c->wasted_size += waste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Stick any now-obsoleted blocks on the erase_pending_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) jffs2_refile_wbuf_blocks(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) jffs2_clear_wbuf_ino_list(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) memset(c->wbuf,0xff,c->wbuf_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* adjust write buffer offset, else we get a non contiguous write bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) c->wbuf_ofs += c->wbuf_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) c->wbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Trigger garbage collection to flush the write-buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) outstanding. If ino arg non-zero, do it only if a write for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) given inode is outstanding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) uint32_t old_wbuf_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) uint32_t old_wbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mutex_lock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!jffs2_wbuf_pending_for_ino(c, ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) old_wbuf_ofs = c->wbuf_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) old_wbuf_len = c->wbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (c->unchecked_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* GC won't make any progress for a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) jffs2_dbg(1, "%s(): padding. Not finished checking\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) down_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* retry flushing wbuf in case jffs2_wbuf_recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) left some data in the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) up_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) } else while (old_wbuf_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) old_wbuf_ofs == c->wbuf_ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ret = jffs2_garbage_collect_pass(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* GC failed. Flush it with padding instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) mutex_lock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) down_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* retry flushing wbuf in case jffs2_wbuf_recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) left some data in the wbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) up_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mutex_lock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) jffs2_dbg(1, "%s(): ends...\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mutex_unlock(&c->alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Pad write-buffer to end and write it, wasting space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) down_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* retry - maybe wbuf recover left some data in wbuf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) up_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (len > (c->wbuf_pagesize - c->wbuf_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) len = c->wbuf_pagesize - c->wbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) memcpy(c->wbuf + c->wbuf_len, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) c->wbuf_len += (uint32_t) len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unsigned long count, loff_t to, size_t *retlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) uint32_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct jffs2_eraseblock *jeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) size_t wbuf_retlen, donelen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) uint32_t outvec_to = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int ret, invec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* If not writebuffered flash, don't bother */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!jffs2_is_writebuffered(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) down_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* If wbuf_ofs is not initialized, set it to target address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (c->wbuf_ofs == 0xFFFFFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) c->wbuf_ofs = PAGE_DIV(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) c->wbuf_len = PAGE_MOD(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) memset(c->wbuf,0xff,c->wbuf_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Sanity checks on target address. It's permitted to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * write at the beginning of a new erase block. Anything else,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * and you die. New block starts at xxx000c (0-b = block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* It's a write to a new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (c->wbuf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) __func__, (unsigned long)to, c->wbuf_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) goto outerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* set pointer to new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) c->wbuf_ofs = PAGE_DIV(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) c->wbuf_len = PAGE_MOD(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* We're not writing immediately after the writebuffer. Bad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) pr_crit("%s(): Non-contiguous write to %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) __func__, (unsigned long)to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (c->wbuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pr_crit("wbuf was previously %08x-%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* adjust alignment offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (c->wbuf_len != PAGE_MOD(to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) c->wbuf_len = PAGE_MOD(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* take care of alignment to next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!c->wbuf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) c->wbuf_len = c->wbuf_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ret = __jffs2_flush_wbuf(c, NOPAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto outerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) for (invec = 0; invec < count; invec++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int vlen = invecs[invec].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) uint8_t *v = invecs[invec].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (c->wbuf_len == c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ret = __jffs2_flush_wbuf(c, NOPAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) goto outerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) vlen -= wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) outvec_to += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) donelen += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) v += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (vlen >= c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) &wbuf_retlen, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto outfile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) vlen -= wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) outvec_to += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) c->wbuf_ofs = outvec_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) donelen += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) v += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (c->wbuf_len == c->wbuf_pagesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ret = __jffs2_flush_wbuf(c, NOPAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto outerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) outvec_to += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) donelen += wbuf_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * If there's a remainder in the wbuf and it's a non-GC write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * remember that the wbuf affects this ino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) *retlen = donelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (jffs2_sum_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (c->wbuf_len && ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) jffs2_wbuf_dirties_inode(c, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) up_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) outfile:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * At this point we have no problem, c->wbuf is empty. However
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * refile nextblock to avoid writing again to same address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) spin_lock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) jeb = &c->blocks[outvec_to / c->sector_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) jffs2_block_refile(c, jeb, REFILE_ANYWAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) spin_unlock(&c->erase_completion_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) outerr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) *retlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) up_write(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * This is the entry for flash write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) size_t *retlen, const u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct kvec vecs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!jffs2_is_writebuffered(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) vecs[0].iov_base = (unsigned char *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) vecs[0].iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) Handle readback from writebuffer and ECC failure return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) loff_t orbf = 0, owbf = 0, lwbf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!jffs2_is_writebuffered(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return mtd_read(c->mtd, ofs, len, retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* Read flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) down_read(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = mtd_read(c->mtd, ofs, len, retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (ret == -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) len, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * We have the raw data without ECC correction in the buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * maybe we are lucky and all data or parts are correct. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * check the node. If data are corrupted node check will sort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * it out. We keep this block, it will fail on write or erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * and the we mark it bad. Or should we do that now? But we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * should give him a chance. Maybe we had a system crash or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * power loss before the ecc write or a erase was completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * So we return success. :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* if no writebuffer available or write buffer empty, return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!c->wbuf_pagesize || !c->wbuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* if we read in a different block, return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (ofs >= c->wbuf_ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (lwbf > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) lwbf = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (orbf > len) /* is write beyond write buffer ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) lwbf = len - orbf; /* number of bytes to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (lwbf > c->wbuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) lwbf = c->wbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (lwbf > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) memcpy(buf+orbf,c->wbuf+owbf,lwbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) up_read(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #define NR_OOB_SCAN_PAGES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* For historical reasons we use only 8 bytes for OOB clean marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #define OOB_CM_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static const struct jffs2_unknown_node oob_cleanmarker =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .totlen = constant_cpu_to_je32(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * Check, if the out of band area is empty. This function knows about the clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * marker and if it is present in OOB, treats the OOB as empty anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int jffs2_check_oob_empty(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct jffs2_eraseblock *jeb, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct mtd_oob_ops ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ops.mode = MTD_OPS_AUTO_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ops.oobbuf = c->oobbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) jeb->offset, ops.ooblen, ops.oobretlen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!ret || mtd_is_bitflip(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) for(i = 0; i < ops.ooblen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (mode && i < cmlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* Yeah, we know about the cleanmarker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ops.oobbuf[i] != 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) jffs2_dbg(2, "Found %02x at %x in OOB for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) "%08x\n", ops.oobbuf[i], i, jeb->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * Check for a valid cleanmarker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Returns: 0 if a valid cleanmarker was found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * 1 if no cleanmarker was found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * negative error code if an error occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct jffs2_eraseblock *jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct mtd_oob_ops ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ops.mode = MTD_OPS_AUTO_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ops.ooblen = cmlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ops.oobbuf = c->oobbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) jeb->offset, ops.ooblen, ops.oobretlen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!ret || mtd_is_bitflip(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct jffs2_eraseblock *jeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct mtd_oob_ops ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ops.mode = MTD_OPS_AUTO_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ops.ooblen = cmlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ops.oobbuf = (uint8_t *)&oob_cleanmarker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (ret || ops.oobretlen != ops.ooblen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) jeb->offset, ops.ooblen, ops.oobretlen, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * On NAND we try to mark this block bad. If the block was erased more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * than MAX_ERASE_FAILURES we mark it finally bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * Don't care about failures. This block remains on the erase-pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * or badblock list as long as nobody manipulates the flash with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * a bootloader or something like that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* if the count is < max, we try to write the counter to the 2nd page oob area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if( ++jeb->bad_count < MAX_ERASE_FAILURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = mtd_block_markbad(c->mtd, bad_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) __func__, jeb->offset, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct delayed_work *dwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dwork = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static void delayed_wbuf_sync(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct jffs2_sb_info *c = work_to_sb(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct super_block *sb = OFNI_BS_2SFFJ(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!sb_rdonly(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) jffs2_dbg(1, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) jffs2_flush_wbuf_gc(c, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) void jffs2_dirty_trigger(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct super_block *sb = OFNI_BS_2SFFJ(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (sb_rdonly(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) delay = msecs_to_jiffies(dirty_writeback_interval * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) jffs2_dbg(1, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!c->mtd->oobsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* Cleanmarker is out-of-band, so inline size zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) c->cleanmarker_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (c->mtd->oobavail == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) pr_err("inconsistent device description\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) jffs2_dbg(1, "using OOB on NAND\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) c->oobavail = c->mtd->oobavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Initialise write buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) init_rwsem(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) c->wbuf_pagesize = c->mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) c->wbuf_ofs = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!c->oobbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (!c->wbuf_verify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) kfree(c->oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) kfree(c->wbuf_verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) kfree(c->oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) c->cleanmarker_size = 0; /* No cleanmarkers needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* Initialize write buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) init_rwsem(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) c->wbuf_pagesize = c->mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Find a suitable c->sector_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * - Not too much sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * - Sectors have to be at least 4 K + some bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * - All known dataflashes have erase sizes of 528 or 1056
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * - we take at least 8 eraseblocks and want to have at least 8K size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * - The concatenation should be a power of 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) c->sector_size = 8 * c->mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) while (c->sector_size < 8192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) c->sector_size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* It may be necessary to adjust the flash size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) c->flash_size = c->mtd->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if ((c->flash_size % c->sector_size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) c->wbuf_ofs = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (!c->wbuf_verify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) c->wbuf_pagesize, c->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) kfree(c->wbuf_verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* Cleanmarker currently occupies whole programming regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * either one or 2 for 8Byte STMicro flashes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) c->cleanmarker_size = max(16u, c->mtd->writesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Initialize write buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) init_rwsem(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) c->wbuf_pagesize = c->mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) c->wbuf_ofs = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!c->wbuf_verify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) kfree(c->wbuf_verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) c->cleanmarker_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (c->mtd->writesize == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* We do not need write-buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) init_rwsem(&c->wbuf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) c->wbuf_pagesize = c->mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) c->wbuf_ofs = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (!c->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) c->wbuf_pagesize, c->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) kfree(c->wbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }