^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) International Business Machines Corp., 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * UBI wear-leveling sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This sub-system is responsible for wear-leveling. It works in terms of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * physical eraseblocks and erase counters and knows nothing about logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * eraseblocks, volumes, etc. From this sub-system's perspective all physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * eraseblocks are of two types - used and free. Used physical eraseblocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * header. The rest of the physical eraseblock contains only %0xFF bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * When physical eraseblocks are returned to the WL sub-system by means of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * done asynchronously in context of the per-UBI device background thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * which is also managed by the WL sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * The wear-leveling is ensured by means of moving the contents of used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * physical eraseblocks with low erase counter to free physical eraseblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * with high erase counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * If the WL sub-system fails to erase a physical eraseblock, it marks it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This sub-system is also responsible for scrubbing. If a bit-flip is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * in a physical eraseblock, it has to be moved. Technically this is the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * as moving it for wear-leveling reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * As it was said, for the UBI sub-system all physical eraseblocks are either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * RB-trees, as well as (temporarily) in the @wl->pq queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * When the WL sub-system returns a physical eraseblock, the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * eraseblock is protected from being moved for some "time". For this reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * the physical eraseblock is not directly moved from the @wl->free tree to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @wl->used tree. There is a protection queue in between where this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * physical eraseblock is temporarily stored (@wl->pq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * All this protection stuff is needed because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * o we don't want to move physical eraseblocks just after we have given them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * to the user; instead, we first want to let users fill them up with data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * o there is a chance that the user will put the physical eraseblock very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * soon, so it makes sense not to move it for some time, but wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Physical eraseblocks stay protected only for limited time. But the "time" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * measured in erase cycles in this case. This is implemented with help of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * protection queue. Eraseblocks are put to the tail of this queue when they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * head of the queue on each erase operation (for any eraseblock). So the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * length of the queue defines how may (global) erase cycles PEBs are protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * To put it differently, each physical eraseblock has 2 main states: free and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * used. The former state corresponds to the @wl->free tree. The latter state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * is split up on several sub-states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * o the WL movement is allowed (@wl->used tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * o the WL movement is disallowed (@wl->erroneous) because the PEB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * erroneous - e.g., there was a read error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * o the WL movement is temporarily prohibited (@wl->pq queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * o scrubbing is needed (@wl->scrub tree).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Depending on the sub-state, wear-leveling entries of the used physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * eraseblocks may be kept in one of those structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Note, in this implementation, we keep a small in-RAM object for each physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * eraseblock. This is surely not a scalable solution. But it appears to be good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * enough for moderately large flashes and it is simple. In future, one may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * re-work this sub-system and make it more scalable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * At the moment this sub-system does not utilize the sequence number, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * was introduced relatively recently. But it would be wise to do this because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * the sequence number of a logical eraseblock characterizes how old is it. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * example, when we move a PEB with low erase counter, and we need to pick the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * pick target PEB with an average EC if our PEB is not very "old". This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * room for future re-works of the WL sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include "ubi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include "wl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Number of physical eraseblocks reserved for wear-leveling purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define WL_RESERVED_PEBS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Maximum difference between two erase counters. If this threshold is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * exceeded, the WL sub-system starts moving data from used physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * eraseblocks with low erase counter to free physical eraseblocks with high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * erase counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * When a physical eraseblock is moved, the WL sub-system has to pick the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * physical eraseblock to move to. The simplest way would be just to pick the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * one with the highest erase counter. But in certain workloads this could lead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * situation when the picked physical eraseblock is constantly erased after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * data is written to it. So, we have a constant which limits the highest erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * counter of the free physical eraseblock to pick. Namely, the WL sub-system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * does not pick eraseblocks with erase counter greater than the lowest erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * counter plus %WL_FREE_MAX_DIFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Maximum number of consecutive background thread failures which is enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * switch to read-only mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define WL_MAX_FAILURES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int self_check_in_wl_tree(const struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct ubi_wl_entry *e, struct rb_root *root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int self_check_in_pq(const struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct ubi_wl_entry *e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @e: the wear-leveling entry to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @root: the root of the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Note, we use (erase counter, physical eraseblock number) pairs as keys in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * the @ubi->used and @ubi->free RB-trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct rb_node **p, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct ubi_wl_entry *e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (e->ec < e1->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) else if (e->ec > e1->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ubi_assert(e->pnum != e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (e->pnum < e1->pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rb_link_node(&e->u.rb, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) rb_insert_color(&e->u.rb, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * wl_tree_destroy - destroy a wear-leveling entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @e: the wear-leveling entry to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * This function destroys a wear leveling entry and removes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * the reference from the lookup table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ubi->lookuptbl[e->pnum] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) kmem_cache_free(ubi_wl_entry_slab, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * do_work - do one pending work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * This function returns zero in case of success and a negative error code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int do_work(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ubi_work *wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @ubi->work_sem is used to synchronize with the workers. Workers take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * it in read mode, so many of them may be doing works at a time. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * the queue flush code has to be sure the whole queue of works is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * done, and it takes the mutex in write mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) down_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (list_empty(&ubi->works)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) up_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wrk = list_entry(ubi->works.next, struct ubi_work, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) list_del(&wrk->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ubi->works_count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ubi_assert(ubi->works_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Call the worker function. Do not touch the work structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * after this call as it will have been freed or reused by that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * time by the worker function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) err = wrk->func(ubi, wrk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ubi_err(ubi, "work failed with error code %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) up_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @e: the wear-leveling entry to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @root: the root of the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * This function returns non-zero if @e is in the @root RB-tree and zero if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) p = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct ubi_wl_entry *e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (e->pnum == e1->pnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ubi_assert(e == e1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (e->ec < e1->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) else if (e->ec > e1->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ubi_assert(e->pnum != e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (e->pnum < e1->pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * in_pq - check if a wear-leveling entry is present in the protection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @e: the wear-leveling entry to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * This function returns non-zero if @e is in the protection queue and zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * if it is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct ubi_wl_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_for_each_entry(p, &ubi->pq[i], u.list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (p == e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * prot_queue_add - add physical eraseblock to the protection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @e: the physical eraseblock to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * This function adds @e to the tail of the protection queue @ubi->pq, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * be locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int pq_tail = ubi->pq_head - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (pq_tail < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pq_tail = UBI_PROT_QUEUE_LEN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * find_wl_entry - find wear-leveling entry closest to certain erase counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @root: the RB-tree where to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * @diff: maximum possible difference from the smallest erase counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * This function looks for a wear leveling entry with erase counter closest to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * min + @diff, where min is the smallest erase counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct rb_root *root, int diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) max = e->ec + diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) p = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct ubi_wl_entry *e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (e1->ec >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) e = e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @root: the RB-tree where to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * This function looks for a wear leveling entry with medium erase counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * but not greater or equivalent than the lowest erase counter plus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * %WL_FREE_MAX_DIFF/2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct ubi_wl_entry *e, *first, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* If no fastmap has been written and this WL entry can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * as anchor PEB, hold it back and return the second best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * WL entry such that fastmap can use the anchor PEB later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) e = may_reserve_for_fm(ubi, e, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * refill_wl_user_pool().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * This function returns a a wear leveling entry in case of success and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * NULL in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) e = find_mean_wl_entry(ubi, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ubi_err(ubi, "no free eraseblocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) self_check_in_wl_tree(ubi, e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Move the physical eraseblock to the protection queue where it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * be protected from being moved for some time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rb_erase(&e->u.rb, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ubi->free_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dbg_wl("PEB %d EC %d", e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * prot_queue_del - remove a physical eraseblock from the protection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @pnum: the physical eraseblock to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * This function deletes PEB @pnum from the protection queue and returns zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * in case of success and %-ENODEV if the PEB was not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int prot_queue_del(struct ubi_device *ubi, int pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (self_check_in_pq(ubi, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) list_del(&e->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dbg_wl("deleted PEB %d from the protection queue", e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * sync_erase - synchronously erase a physical eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * @e: the the physical eraseblock to erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @torture: if the physical eraseblock has to be tortured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * This function returns zero in case of success and a negative error code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int torture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct ubi_ec_hdr *ec_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) unsigned long long ec = e->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) err = self_check_ec(ubi, e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!ec_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err = ubi_io_sync_erase(ubi, e->pnum, torture);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ec += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ec > UBI_MAX_ERASECOUNTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Erase counter overflow. Upgrade UBI and use 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * erase counters internally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) e->pnum, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ec_hdr->ec = cpu_to_be64(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) e->ec = ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (e->ec > ubi->max_ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ubi->max_ec = e->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) kfree(ec_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * serve_prot_queue - check if it is time to stop protecting PEBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * This function is called after each erase operation and removes PEBs from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * tail of the protection queue. These PEBs have been protected for long enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * and should be moved to the used tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void serve_prot_queue(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct ubi_wl_entry *e, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * There may be several protected physical eraseblock to remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * process them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dbg_wl("PEB %d EC %d protection over, move to used tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) list_del(&e->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) wl_tree_add(e, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (count++ > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Let's be nice and avoid holding the spinlock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ubi->pq_head += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ubi->pq_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * __schedule_ubi_work - schedule a work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @wrk: the work to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * This function adds a work defined by @wrk to the tail of the pending works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * list. Can only be used if ubi->work_sem is already held in read mode!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) list_add_tail(&wrk->list, &ubi->works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ubi_assert(ubi->works_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ubi->works_count += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) wake_up_process(ubi->bgt_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * schedule_ubi_work - schedule a work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * @wrk: the work to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * This function adds a work defined by @wrk to the tail of the pending works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) down_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) __schedule_ubi_work(ubi, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) up_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * schedule_erase - schedule an erase work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * @e: the WL entry of the physical eraseblock to erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * @vol_id: the volume ID that last used this PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * @lnum: the last used logical eraseblock number for the PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * @torture: if the physical eraseblock has to be tortured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * This function returns zero in case of success and a %-ENOMEM in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int vol_id, int lnum, int torture, bool nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct ubi_work *wl_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ubi_assert(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) e->pnum, e->ec, torture);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!wl_wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) wl_wrk->func = &erase_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) wl_wrk->e = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) wl_wrk->vol_id = vol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) wl_wrk->lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) wl_wrk->torture = torture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __schedule_ubi_work(ubi, wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) schedule_ubi_work(ubi, wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * do_sync_erase - run the erase worker synchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @e: the WL entry of the physical eraseblock to erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @vol_id: the volume ID that last used this PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @lnum: the last used logical eraseblock number for the PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * @torture: if the physical eraseblock has to be tortured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int vol_id, int lnum, int torture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct ubi_work wl_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dbg_wl("sync erase of PEB %i", e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) wl_wrk.e = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) wl_wrk.vol_id = vol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) wl_wrk.lnum = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) wl_wrk.torture = torture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return __erase_worker(ubi, &wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * wear_leveling_worker - wear-leveling worker function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @wrk: the work object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @shutdown: non-zero if the worker has to free memory and exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * because the WL-subsystem is shutting down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * This function copies a more worn out physical eraseblock to a less worn out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * one. Returns zero in case of success and a negative error code in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int erase = 0, keep = 0, vol_id = -1, lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct ubi_wl_entry *e1, *e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct ubi_vid_io_buf *vidb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct ubi_vid_hdr *vid_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int dst_leb_clean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) kfree(wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!vidb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) vid_hdr = ubi_get_vid_hdr(vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mutex_lock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ubi_assert(!ubi->move_from && !ubi->move_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ubi_assert(!ubi->move_to_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!ubi->free.rb_node ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * No free physical eraseblocks? Well, they must be waiting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * the queue to be erased. Cancel movement - it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * triggered again when a free physical eraseblock appears.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * No used physical eraseblocks? They must be temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * protected from being moved. They will be moved to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * @ubi->used tree later and the wear-leveling will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * triggered again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dbg_wl("cancel WL, a list is empty: free %d, used %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) !ubi->free.rb_node, !ubi->used.rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #ifdef CONFIG_MTD_UBI_FASTMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) e1 = find_anchor_wl_entry(&ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (e1 && ubi->fm_next_anchor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ubi->fm_do_produce_anchor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* fm_next_anchor is no longer considered a good anchor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * NULL assignment also prevents multiple wear level checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * of this PEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) wl_tree_add(ubi->fm_next_anchor, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ubi->fm_next_anchor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (ubi->fm_do_produce_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!e1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) e2 = get_peb_for_wl(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) self_check_in_wl_tree(ubi, e1, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rb_erase(&e1->u.rb, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ubi->fm_do_produce_anchor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) } else if (!ubi->scrub.rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!ubi->scrub.rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Now pick the least worn-out used physical eraseblock and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * highly worn-out free physical eraseblock. If the erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * counters differ much enough, start wear-leveling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) e2 = get_peb_for_wl(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dbg_wl("no WL needed: min used EC %d, max free EC %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) e1->ec, e2->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Give the unused PEB back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) wl_tree_add(e2, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) self_check_in_wl_tree(ubi, e1, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rb_erase(&e1->u.rb, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dbg_wl("move PEB %d EC %d to PEB %d EC %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) e1->pnum, e1->ec, e2->pnum, e2->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* Perform scrubbing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) scrubbing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) e2 = get_peb_for_wl(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) self_check_in_wl_tree(ubi, e1, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) rb_erase(&e1->u.rb, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ubi->move_from = e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ubi->move_to = e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * We so far do not know which logical eraseblock our physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * eraseblock (@e1) belongs to. We have to read the volume identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * header first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Note, we are protected from this PEB being unmapped and erased. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * which is being moved was unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (err && err != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dst_leb_clean = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (err == UBI_IO_FF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * We are trying to move PEB without a VID header. UBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * always write VID headers shortly after the PEB was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * given, so we have a situation when it has not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * had a chance to write it, because it was preempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * So add this PEB to the protection queue so far,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * because presumably more data will be written there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * (including the missing VID header), and then we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * move it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dbg_wl("PEB %d has no VID header", e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) protect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else if (err == UBI_IO_FF_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * The same situation as %UBI_IO_FF, but bit-flips were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * detected. It is better to schedule this PEB for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * scrubbing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dbg_wl("PEB %d has no VID header but has bit-flips",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) scrubbing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * While a full scan would detect interrupted erasures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * at attach time we can face them here when attached from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) erase = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ubi_err(ubi, "error %d while reading VID header from PEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) err, e1->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) vol_id = be32_to_cpu(vid_hdr->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) lnum = be32_to_cpu(vid_hdr->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (err == MOVE_CANCEL_RACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * The LEB has not been moved because the volume is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * being deleted or the PEB has been put meanwhile. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * should prevent this PEB from being selected for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * wear-leveling movement again, so put it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * protection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) protect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dst_leb_clean = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (err == MOVE_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) scrubbing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) dst_leb_clean = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) err == MOVE_TARGET_RD_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Target PEB had bit-flips or write error - torture it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) torture = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) keep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (err == MOVE_SOURCE_RD_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * An error happened while reading the source PEB. Do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * not switch to R/O mode in this case, and give the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * upper layers a possibility to recover from this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * e.g. by unmapping corresponding LEB. Instead, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * put this PEB to the @ubi->erroneous list to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * UBI from trying to move it over and over again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (ubi->erroneous_peb_count > ubi->max_erroneous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ubi_err(ubi, "too many erroneous eraseblocks (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ubi->erroneous_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) dst_leb_clean = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) erroneous = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto out_not_moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ubi_assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* The PEB has been successfully moved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (scrubbing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) e1->pnum, vol_id, lnum, e2->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ubi_free_vid_buf(vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!ubi->move_to_put) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) wl_tree_add(e2, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) e2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ubi->move_from = ubi->move_to = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ubi->move_to_put = ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) wl_entry_destroy(ubi, e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (e2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Well, the target PEB was put meanwhile, schedule it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * erasure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) e2->pnum, vol_id, lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dbg_wl("done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mutex_unlock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * For some reasons the LEB was not moved, might be an error, might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * something else. @e1 was not changed, so return it back. @e2 might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * have been changed, schedule it for erasure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) out_not_moved:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (vol_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) e1->pnum, vol_id, lnum, e2->pnum, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dbg_wl("cancel moving PEB %d to PEB %d (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) e1->pnum, e2->pnum, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (protect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) prot_queue_add(ubi, e1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) else if (erroneous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) wl_tree_add(e1, &ubi->erroneous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ubi->erroneous_peb_count += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) } else if (scrubbing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) wl_tree_add(e1, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) else if (keep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) wl_tree_add(e1, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (dst_leb_clean) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) wl_tree_add(e2, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ubi_assert(!ubi->move_to_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ubi->move_from = ubi->move_to = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ubi_free_vid_buf(vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (dst_leb_clean) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ensure_wear_leveling(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (erase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) mutex_unlock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (vol_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err, e1->pnum, e2->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) err, e1->pnum, vol_id, lnum, e2->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ubi->move_from = ubi->move_to = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ubi->move_to_put = ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ubi_free_vid_buf(vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) wl_entry_destroy(ubi, e1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) wl_entry_destroy(ubi, e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) out_ro:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mutex_unlock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ubi_assert(err != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return err < 0 ? err : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) out_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) mutex_unlock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ubi_free_vid_buf(vidb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * ensure_wear_leveling - schedule wear-leveling if it is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @nested: set to non-zero if this function is called from UBI worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * This function checks if it is time to start wear-leveling and schedules it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * if yes. This function returns zero in case of success and a negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct ubi_wl_entry *e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct ubi_wl_entry *e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct ubi_work *wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ubi->wl_scheduled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* Wear-leveling is already in the work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * If the ubi->scrub tree is not empty, scrubbing is needed, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * the WL worker has to be scheduled anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (!ubi->scrub.rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (!ubi->used.rb_node || !ubi->free.rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* No physical eraseblocks - no deal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * We schedule wear-leveling only if the difference between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * lowest erase counter of used physical eraseblocks and a high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * erase counter of free physical eraseblocks is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * %UBI_WL_THRESHOLD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dbg_wl("schedule wear-leveling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dbg_wl("schedule scrubbing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ubi->wl_scheduled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!wrk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) goto out_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) wrk->func = &wear_leveling_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) __schedule_ubi_work(ubi, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) schedule_ubi_work(ubi, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) out_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * __erase_worker - physical eraseblock erase worker function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * @wl_wrk: the work object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * @shutdown: non-zero if the worker has to free memory and exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * because the WL sub-system is shutting down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * This function erases a physical eraseblock and perform torture testing if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * needed. It also takes care about marking the physical eraseblock bad if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * needed. Returns zero in case of success and a negative error code in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct ubi_wl_entry *e = wl_wrk->e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int pnum = e->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int vol_id = wl_wrk->vol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int lnum = wl_wrk->lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int err, available_consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dbg_wl("erase PEB %d EC %d LEB %d:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) err = sync_erase(ubi, e, wl_wrk->torture);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) e->pnum < UBI_FM_MAX_START) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Abort anchor production, if needed it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * enabled again in the wear leveling started below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ubi->fm_next_anchor = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ubi->fm_do_produce_anchor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) wl_tree_add(e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * One more erase operation has happened, take care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * protected physical eraseblocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) serve_prot_queue(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* And take care about wear-leveling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) err = ensure_wear_leveling(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Re-schedule the LEB for erasure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (err1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) err = err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (err != -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * If this is not %-EIO, we have no idea what to do. Scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * this physical eraseblock for erasure again would cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * errors again and again. Well, lets switch to R/O mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* It is %-EIO, the PEB went bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!ubi->bad_allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) spin_lock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (ubi->beb_rsvd_pebs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (ubi->avail_pebs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) spin_unlock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ubi_err(ubi, "no reserved/available physical eraseblocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ubi->avail_pebs -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) available_consumed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) spin_unlock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ubi_msg(ubi, "mark PEB %d as bad", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) err = ubi_io_mark_bad(ubi, pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto out_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) spin_lock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (ubi->beb_rsvd_pebs > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (available_consumed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * The amount of reserved PEBs increased since we last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ubi->avail_pebs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) available_consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ubi->beb_rsvd_pebs -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ubi->bad_peb_count += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ubi->good_peb_count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ubi_calculate_reserved(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (available_consumed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else if (ubi->beb_rsvd_pebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ubi_msg(ubi, "%d PEBs left in the reserve",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ubi->beb_rsvd_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ubi_warn(ubi, "last PEB from the reserve was used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) spin_unlock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) out_ro:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (available_consumed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) spin_lock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ubi->avail_pebs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) spin_unlock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) int shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct ubi_wl_entry *e = wl_wrk->e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) kfree(wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ret = __erase_worker(ubi, wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) kfree(wl_wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * @vol_id: the volume ID that last used this PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * @lnum: the last used logical eraseblock number for the PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * @pnum: physical eraseblock to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * @torture: if this physical eraseblock has to be tortured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * This function is called to return physical eraseblock @pnum to the pool of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * free physical eraseblocks. The @torture flag has to be set if an I/O error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * occurred to this @pnum and it has to be tested. This function returns zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * in case of success, and a negative error code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int pnum, int torture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dbg_wl("PEB %d", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ubi_assert(pnum >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ubi_assert(pnum < ubi->peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) down_read(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (e == ubi->move_from) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * User is putting the physical eraseblock which was selected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * be moved. It will be scheduled for erasure in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * wear-leveling worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dbg_wl("PEB %d is being moved, wait", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* Wait for the WL worker by taking the @ubi->move_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mutex_lock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) mutex_unlock(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) } else if (e == ubi->move_to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * User is putting the physical eraseblock which was selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * as the target the data is moved to. It may happen if the EBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * but the WL sub-system has not put the PEB to the "used" tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * yet, but it is about to do this. So we just set a flag which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * will tell the WL worker that the PEB is not needed anymore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * and should be scheduled for erasure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dbg_wl("PEB %d is the target of data moving", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ubi_assert(!ubi->move_to_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ubi->move_to_put = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) up_read(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (in_wl_tree(e, &ubi->used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) self_check_in_wl_tree(ubi, e, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) rb_erase(&e->u.rb, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) } else if (in_wl_tree(e, &ubi->scrub)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) self_check_in_wl_tree(ubi, e, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) rb_erase(&e->u.rb, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) } else if (in_wl_tree(e, &ubi->erroneous)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) self_check_in_wl_tree(ubi, e, &ubi->erroneous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rb_erase(&e->u.rb, &ubi->erroneous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ubi->erroneous_peb_count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ubi_assert(ubi->erroneous_peb_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /* Erroneous PEBs should be tortured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) torture = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = prot_queue_del(ubi, e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) ubi_err(ubi, "PEB %d not found", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) up_read(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) wl_tree_add(e, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) up_read(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * @pnum: the physical eraseblock to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * needs scrubbing. This function schedules a physical eraseblock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * scrubbing which is done in background. This function returns zero in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * success and a negative error code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) in_wl_tree(e, &ubi->erroneous)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (e == ubi->move_to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * This physical eraseblock was used to move data to. The data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * was moved but the PEB was not yet inserted to the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * tree. We should just wait a little and let the WL worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) dbg_wl("the PEB %d is not in proper tree, retry", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) yield();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (in_wl_tree(e, &ubi->used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) self_check_in_wl_tree(ubi, e, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) rb_erase(&e->u.rb, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) err = prot_queue_del(ubi, e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ubi_err(ubi, "PEB %d not found", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) wl_tree_add(e, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Technically scrubbing is the same as wear-leveling, so it is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * by the WL worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return ensure_wear_leveling(ubi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * ubi_wl_flush - flush all pending works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * @vol_id: the volume id to flush for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * @lnum: the logical eraseblock number to flush for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * This function executes all pending works for a particular volume id /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * acts as a wildcard for all of the corresponding volume numbers or logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * eraseblock numbers. It returns zero in case of success and a negative error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) int found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * Erase while the pending works queue is not empty, but not more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * the number of currently pending works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) vol_id, lnum, ubi->works_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) while (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct ubi_work *wrk, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) down_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) (lnum == UBI_ALL || wrk->lnum == lnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) list_del(&wrk->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ubi->works_count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ubi_assert(ubi->works_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err = wrk->func(ubi, wrk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) up_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) up_read(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * Make sure all the works which have been done in parallel are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) down_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) up_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (in_wl_tree(e, &ubi->scrub))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) else if (in_wl_tree(e, &ubi->erroneous))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) else if (ubi->move_from == e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) else if (ubi->move_to == e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * @pnum: the physical eraseblock to schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * @force: dont't read the block, assume bitflips happened and take action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * This function reads the given eraseblock and checks if bitflips occured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * In case of bitflips, the eraseblock is scheduled for scrubbing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * If scrubbing is forced with @force, the eraseblock is not read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * but scheduled for scrubbing right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * %EINVAL, PEB is out of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * %ENOENT, PEB is no longer used by UBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * %EBUSY, PEB cannot be checked now or a check is currently running on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * %EAGAIN, bit flips happened but scrubbing is currently not possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * %0, no bit flips detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (pnum < 0 || pnum >= ubi->peb_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * Pause all parallel work, otherwise it can happen that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * erase worker frees a wl entry under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) down_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * Make sure that the wl entry does not change state while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * inspecting it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto out_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Does it make sense to check this PEB?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!scrub_possible(ubi, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto out_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) mutex_lock(&ubi->buf_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) mutex_unlock(&ubi->buf_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (force || err == UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * Okay, bit flip happened, let's figure out what we can do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * Recheck. We released wl_lock, UBI might have killed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * wl entry under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) goto out_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * Need to re-check state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!scrub_possible(ubi, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) goto out_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (in_pq(ubi, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) prot_queue_del(ubi, e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) wl_tree_add(e, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) err = ensure_wear_leveling(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) } else if (in_wl_tree(e, &ubi->used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) rb_erase(&e->u.rb, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) wl_tree_add(e, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) err = ensure_wear_leveling(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) } else if (in_wl_tree(e, &ubi->free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) rb_erase(&e->u.rb, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ubi->free_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * This PEB is empty we can schedule it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * erasure right away. No wear leveling needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) force ? 0 : 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (!err && !force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) err = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) out_resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) up_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * tree_destroy - destroy an RB-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * @root: the root of the tree to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct rb_node *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rb = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) while (rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (rb->rb_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) rb = rb->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) else if (rb->rb_right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) rb = rb->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) e = rb_entry(rb, struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) rb = rb_parent(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (rb->rb_left == &e->u.rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) rb->rb_left = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) rb->rb_right = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * ubi_thread - UBI background thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * @u: the UBI device description object pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) int ubi_thread(void *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) int failures = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct ubi_device *ubi = u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ubi_msg(ubi, "background thread \"%s\" started, PID %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ubi->bgt_name, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (try_to_freeze())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (list_empty(&ubi->works) || ubi->ro_mode ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * Check kthread_should_stop() after we set the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * state to guarantee that we either see the stop bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * and exit or the task state is reset to runnable such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * that it's not scheduled out indefinitely and detects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * the stop bit at kthread_should_stop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) err = do_work(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) ubi_err(ubi, "%s: work failed with error code %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ubi->bgt_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (failures++ > WL_MAX_FAILURES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * Too many failures, disable the thread and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * switch to read-only mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ubi_msg(ubi, "%s: %d consecutive failures",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ubi->bgt_name, WL_MAX_FAILURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ubi->thread_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) failures = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) ubi->thread_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * shutdown_work - shutdown all pending works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static void shutdown_work(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) while (!list_empty(&ubi->works)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct ubi_work *wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) wrk = list_entry(ubi->works.next, struct ubi_work, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) list_del(&wrk->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) wrk->func(ubi, wrk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ubi->works_count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ubi_assert(ubi->works_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * erase_aeb - erase a PEB given in UBI attach info PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * @aeb: UBI attach info PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * @sync: If true, erase synchronously. Otherwise schedule for erasure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) e->pnum = aeb->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) e->ec = aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) ubi->lookuptbl[e->pnum] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) err = sync_erase(ubi, e, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) wl_tree_add(e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * ubi_wl_init - initialize the WL sub-system using attaching information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * @ai: attaching information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * This function returns zero in case of success, and a negative error code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int err, i, reserved_pebs, found_pebs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct rb_node *rb1, *rb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct ubi_ainf_peb *aeb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) spin_lock_init(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) mutex_init(&ubi->move_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) init_rwsem(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ubi->max_ec = ai->max_ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) INIT_LIST_HEAD(&ubi->works);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (!ubi->lookuptbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) INIT_LIST_HEAD(&ubi->pq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ubi->pq_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ubi->free_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) err = erase_aeb(ubi, aeb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) found_pebs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) list_for_each_entry(aeb, &ai->free, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) e->pnum = aeb->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) e->ec = aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) ubi_assert(e->ec >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) wl_tree_add(e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ubi->lookuptbl[e->pnum] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) found_pebs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) e->pnum = aeb->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) e->ec = aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ubi->lookuptbl[e->pnum] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (!aeb->scrub) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) dbg_wl("add PEB %d EC %d to the used tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) wl_tree_add(e, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) dbg_wl("add PEB %d EC %d to the scrub tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) wl_tree_add(e, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) found_pebs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) list_for_each_entry(aeb, &ai->fastmap, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) e = ubi_find_fm_block(ubi, aeb->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) ubi_assert(!ubi->lookuptbl[e->pnum]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ubi->lookuptbl[e->pnum] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) bool sync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * Usually old Fastmap PEBs are scheduled for erasure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * and we don't have to care about them but if we face
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * an power cut before scheduling them we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * take care of them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (ubi->lookuptbl[aeb->pnum])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * The fastmap update code might not find a free PEB for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * writing the fastmap anchor to and then reuses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * current fastmap anchor PEB. When this PEB gets erased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * and a power cut happens before it is written again we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * must make sure that the fastmap attach code doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * find any outdated fastmap anchors, hence we erase the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * outdated fastmap anchor PEBs synchronously here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) err = erase_aeb(ubi, aeb, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) found_pebs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) dbg_wl("found %i PEBs", found_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) ubi_assert(ubi->good_peb_count == found_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) reserved_pebs = WL_RESERVED_PEBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ubi_fastmap_init(ubi, &reserved_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (ubi->avail_pebs < reserved_pebs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) ubi->avail_pebs, reserved_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (ubi->corr_peb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ubi_err(ubi, "%d PEBs are corrupted and not used",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ubi->corr_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) ubi->avail_pebs -= reserved_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ubi->rsvd_pebs += reserved_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* Schedule wear-leveling if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) err = ensure_wear_leveling(ubi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) #ifdef CONFIG_MTD_UBI_FASTMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (!ubi->ro_mode && !ubi->fm_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ubi_ensure_anchor_pebs(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) shutdown_work(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) tree_destroy(ubi, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) tree_destroy(ubi, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) tree_destroy(ubi, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) kfree(ubi->lookuptbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * protection_queue_destroy - destroy the protection queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static void protection_queue_destroy(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct ubi_wl_entry *e, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) list_del(&e->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) wl_entry_destroy(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * ubi_wl_close - close the wear-leveling sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) void ubi_wl_close(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) dbg_wl("close the WL sub-system");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) ubi_fastmap_close(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) shutdown_work(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) protection_queue_destroy(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) tree_destroy(ubi, &ubi->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) tree_destroy(ubi, &ubi->erroneous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) tree_destroy(ubi, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) tree_destroy(ubi, &ubi->scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) kfree(ubi->lookuptbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * self_check_ec - make sure that the erase counter of a PEB is correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * @pnum: the physical eraseblock number to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * @ec: the erase counter to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * This function returns zero if the erase counter of physical eraseblock @pnum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * is equivalent to @ec, and a negative error code if not or if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) long long read_ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct ubi_ec_hdr *ec_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (!ubi_dbg_chk_gen(ubi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (!ec_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (err && err != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /* The header does not have to exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) read_ec = be64_to_cpu(ec_hdr->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (ec != read_ec && read_ec - ec > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) ubi_err(ubi, "self-check failed for PEB %d", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) kfree(ec_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * @e: the wear-leveling entry to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * @root: the root of the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static int self_check_in_wl_tree(const struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct ubi_wl_entry *e, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (!ubi_dbg_chk_gen(ubi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (in_wl_tree(e, root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) e->pnum, e->ec, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * self_check_in_pq - check if wear-leveling entry is in the protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * @e: the wear-leveling entry to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) static int self_check_in_pq(const struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (!ubi_dbg_chk_gen(ubi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (in_pq(ubi, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) e->pnum, e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) #ifndef CONFIG_MTD_UBI_FASTMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) self_check_in_wl_tree(ubi, e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) ubi->free_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) ubi_assert(ubi->free_count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) rb_erase(&e->u.rb, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * produce_free_peb - produce a free physical eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * This function tries to make a free PEB by means of synchronous execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * pending works. This may be needed if, for example the background thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * disabled. Returns zero in case of success and a negative error code in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static int produce_free_peb(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) while (!ubi->free.rb_node && ubi->works_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) dbg_wl("do one work synchronously");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) err = do_work(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * ubi_wl_get_peb - get a physical eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * This function returns a physical eraseblock in case of success and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * negative error code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * Returns with ubi->fm_eba_sem held in read mode!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) int ubi_wl_get_peb(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (!ubi->free.rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (ubi->works_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) ubi_err(ubi, "no free eraseblocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) ubi_assert(list_empty(&ubi->works));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) err = produce_free_peb(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) e = wl_get_wle(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) prot_queue_add(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ubi->peb_size - ubi->vid_hdr_aloffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return e->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) #include "fastmap-wl.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) #endif