^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2012 Linutronix GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2014 sigma star gmbh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Richard Weinberger <richard@nod.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * @wrk: the work description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static void update_fastmap_work_fn(struct work_struct *wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) ubi_update_fastmap(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ubi->fm_work_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @root: the RB-tree where to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct ubi_wl_entry *e, *victim = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int max_ec = UBI_MAX_ERASECOUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ubi_rb_for_each_entry(p, e, root, u.rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) victim = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) max_ec = e->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline void return_unused_peb(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ubi_wl_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) wl_tree_add(e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * return_unused_pool_pebs - returns unused PEB to the free tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @pool: fastmap pool description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void return_unused_pool_pebs(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ubi_fm_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) for (i = pool->used; i < pool->size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) e = ubi->lookuptbl[pool->pebs[i]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return_unused_peb(ubi, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @anchor: This PEB will be used as anchor PEB by fastmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The function returns a physical erase block with a given maximal number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * and removes it from the wl subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Must be called with wl_lock held!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct ubi_wl_entry *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (anchor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) e = find_anchor_wl_entry(&ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) e = find_mean_wl_entry(ubi, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) self_check_in_wl_tree(ubi, e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* remove it from the free list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * the wl subsystem does no longer know this erase block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rb_erase(&e->u.rb, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ubi->free_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * ubi_refill_pools - refills all fastmap PEB pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void ubi_refill_pools(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct ubi_fm_pool *pool = &ubi->fm_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int enough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return_unused_pool_pebs(ubi, wl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return_unused_pool_pebs(ubi, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) wl_pool->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pool->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (ubi->fm_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) wl_tree_add(ubi->fm_anchor, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (ubi->fm_next_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) wl_tree_add(ubi->fm_next_anchor, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ubi->free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* All available PEBs are in ubi->free, now is the time to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * the best anchor PEBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enough = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (pool->size < pool->max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!ubi->free.rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) e = wl_get_wle(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) pool->pebs[pool->size] = e->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pool->size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) enough++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (wl_pool->size < wl_pool->max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!ubi->free.rb_node ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) (ubi->free_count - ubi->beb_rsvd_pebs < 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) self_check_in_wl_tree(ubi, e, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) rb_erase(&e->u.rb, &ubi->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ubi->free_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) wl_pool->pebs[wl_pool->size] = e->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) wl_pool->size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) enough++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (enough == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) wl_pool->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pool->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * produce_free_peb - produce a free physical eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * This function tries to make a free PEB by means of synchronous execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * pending works. This may be needed if, for example the background thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * disabled. Returns zero in case of success and a negative error code in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int produce_free_peb(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) while (!ubi->free.rb_node && ubi->works_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dbg_wl("do one work synchronously");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) err = do_work(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * ubi_wl_get_peb - get a physical eraseblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * This function returns a physical eraseblock in case of success and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * negative error code in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Returns with ubi->fm_eba_sem held in read mode!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int ubi_wl_get_peb(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int ret, attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct ubi_fm_pool *pool = &ubi->fm_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* We check here also for the WL pool because at this point we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * refill the WL pool synchronous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = ubi_update_fastmap(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (pool->used == pool->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) attempts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (attempts == 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ubi_err(ubi, "Unable to get a free PEB from user WL pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) up_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ret = produce_free_peb(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) down_read(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ubi_assert(pool->used < pool->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ret = pool->pebs[pool->used++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) prot_queue_add(ubi, ubi->lookuptbl[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (pool->used == pool->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* We cannot update the fastmap here because this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * function is called in atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Let's fail here and refill/update it as soon as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (!ubi->fm_work_scheduled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ubi->fm_work_scheduled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) schedule_work(&ubi->fm_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pnum = pool->pebs[pool->used++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct ubi_work *wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Do we have a next anchor? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!ubi->fm_next_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!ubi->fm_next_anchor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Tell wear leveling to produce a new anchor PEB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ubi->fm_do_produce_anchor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Do wear leveling to get a new anchor PEB or check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * existing next anchor candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (ubi->wl_scheduled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ubi->wl_scheduled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!wrk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ubi->wl_scheduled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) wrk->func = &wear_leveling_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) __schedule_ubi_work(ubi, wrk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * see: ubi_wl_put_peb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @fm_e: physical eraseblock to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @lnum: the last used logical eraseblock number for the PEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @torture: if this physical eraseblock has to be tortured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int lnum, int torture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int vol_id, pnum = fm_e->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dbg_wl("PEB %d", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ubi_assert(pnum >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ubi_assert(pnum < ubi->peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) e = ubi->lookuptbl[pnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* This can happen if we recovered from a fastmap the very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * first time and writing now a new one. In this case the wl system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * has never seen any PEB used by the original fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) e = fm_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ubi_assert(e->ec >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ubi->lookuptbl[pnum] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return schedule_erase(ubi, e, vol_id, lnum, torture, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * ubi_is_erase_work - checks whether a work is erase work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * @wrk: The work object to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int ubi_is_erase_work(struct ubi_work *wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return wrk->func == erase_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void ubi_fastmap_close(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return_unused_pool_pebs(ubi, &ubi->fm_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (ubi->fm_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return_unused_peb(ubi, ubi->fm_anchor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ubi->fm_anchor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (ubi->fm_next_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return_unused_peb(ubi, ubi->fm_next_anchor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ubi->fm_next_anchor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (ubi->fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) for (i = 0; i < ubi->fm->used_blocks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) kfree(ubi->fm->e[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) kfree(ubi->fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * See find_mean_wl_entry()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @e: physical eraseblock to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @root: RB tree to test against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct ubi_wl_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct rb_root *root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (e && !ubi->fm_disabled && !ubi->fm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) e->pnum < UBI_FM_MAX_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) e = rb_entry(rb_next(root->rb_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct ubi_wl_entry, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }