Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2012 Linutronix GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2014 sigma star gmbh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Richard Weinberger <richard@nod.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include "ubi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * init_seen - allocate memory for used for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) static inline unsigned long *init_seen(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 	unsigned long *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	if (!ubi_dbg_chk_fastmap(ubi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 		      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * free_seen - free the seen logic integer array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * @seen: integer array of @ubi->peb_count size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static inline void free_seen(unsigned long *seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	kfree(seen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * set_seen - mark a PEB as seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * @pnum: The PEB to be makred as seen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * @seen: integer array of @ubi->peb_count size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	set_bit(pnum, seen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * self_check_seen - check whether all PEB have been seen by fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * @seen: integer array of @ubi->peb_count size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	int pnum, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) size_t ubi_calc_fm_size(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	size = sizeof(struct ubi_fm_sb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		sizeof(struct ubi_fm_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		sizeof(struct ubi_fm_scan_pool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		sizeof(struct ubi_fm_scan_pool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		(sizeof(struct ubi_fm_eba) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		(ubi->peb_count * sizeof(__be32))) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	return roundup(size, ubi->leb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * new_fm_vhdr - allocate a new volume header for fastmap usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * @ubi: UBI device description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * @vol_id: the VID of the new header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * Returns a new struct ubi_vid_hdr on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * NULL indicates out of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct ubi_vid_io_buf *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct ubi_vid_hdr *vh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	vh = ubi_get_vid_hdr(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	vh->vol_type = UBI_VID_DYNAMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	vh->vol_id = cpu_to_be32(vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* UBI implementations without fastmap support have to delete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	 * fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	vh->compat = UBI_COMPAT_DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * add_aeb - create and add a attach erase block to a given list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * @ai: UBI attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * @list: the target list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * @pnum: PEB number of the new attach erase block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * @ec: erease counter of the new LEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * @scrub: scrub this PEB after attaching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		   int pnum, int ec, int scrub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	struct ubi_ainf_peb *aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	aeb = ubi_alloc_aeb(ai, pnum, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	if (!aeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	aeb->lnum = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	aeb->scrub = scrub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	aeb->copy_flag = aeb->sqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	ai->ec_sum += aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	ai->ec_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	if (ai->max_ec < aeb->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		ai->max_ec = aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (ai->min_ec > aeb->ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		ai->min_ec = aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	list_add_tail(&aeb->u.list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * add_vol - create and add a new volume to ubi_attach_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * @ai: ubi_attach_info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * @vol_id: VID of the new volume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * @used_ebs: number of used EBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * @data_pad: data padding value of the new volume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * @vol_type: volume type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * @last_eb_bytes: number of bytes in the last LEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * Returns the new struct ubi_ainf_volume on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * NULL indicates an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				       int used_ebs, int data_pad, u8 vol_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 				       int last_eb_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	av = ubi_add_av(ai, vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (IS_ERR(av))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	av->data_pad = data_pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	av->last_data_size = last_eb_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	av->compat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	av->vol_type = vol_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (av->vol_type == UBI_STATIC_VOLUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		av->used_ebs = used_ebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	dbg_bld("found volume (ID %i)", vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * from it's original list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * @ai: ubi_attach_info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * @aeb: the to be assigned SEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * @av: target scan volume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void assign_aeb_to_av(struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			     struct ubi_ainf_peb *aeb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			     struct ubi_ainf_volume *av)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct ubi_ainf_peb *tmp_aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		if (aeb->lnum != tmp_aeb->lnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			if (aeb->lnum < tmp_aeb->lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 				p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 				p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	list_del(&aeb->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	av->leb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	rb_link_node(&aeb->u.rb, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	rb_insert_color(&aeb->u.rb, &av->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  * update_vol - inserts or updates a LEB which was found a pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * @ubi: the UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * @ai: attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * @av: the volume this LEB belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * @new_vh: the volume header derived from new_aeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * @new_aeb: the AEB to be examined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		      struct ubi_ainf_peb *new_aeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct ubi_ainf_peb *aeb, *victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	int cmp_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 				p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 				p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		/* This case can happen if the fastmap gets written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		 * because of a volume change (creation, deletion, ..).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		 * Then a PEB can be within the persistent EBA and the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		if (aeb->pnum == new_aeb->pnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			ubi_assert(aeb->lnum == new_aeb->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			ubi_free_aeb(ai, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		if (cmp_res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			return cmp_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		/* new_aeb is newer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		if (cmp_res & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			if (!victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			list_add_tail(&victim->u.list, &ai->erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				av->last_data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 					be32_to_cpu(new_vh->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 				av->vol_id, aeb->lnum, new_aeb->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			aeb->ec = new_aeb->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			aeb->pnum = new_aeb->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			aeb->copy_flag = new_vh->copy_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			aeb->scrub = new_aeb->scrub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			aeb->sqnum = new_aeb->sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			ubi_free_aeb(ai, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		/* new_aeb is older */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				av->vol_id, aeb->lnum, new_aeb->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			list_add_tail(&new_aeb->u.list, &ai->erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/* This LEB is new, let's add it to the volume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		av->last_data_size = be32_to_cpu(new_vh->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (av->vol_type == UBI_STATIC_VOLUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	av->leb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	rb_link_node(&new_aeb->u.rb, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	rb_insert_color(&new_aeb->u.rb, &av->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * process_pool_aeb - we found a non-empty PEB in a pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * @ai: attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * @new_vh: the volume header derived from new_aeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * @new_aeb: the AEB to be examined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			    struct ubi_vid_hdr *new_vh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			    struct ubi_ainf_peb *new_aeb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	int vol_id = be32_to_cpu(new_vh->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		ubi_free_aeb(ai, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	/* Find the volume this SEB belongs to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	av = ubi_find_av(ai, vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (!av) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		ubi_err(ubi, "orphaned volume in fastmap pool!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		ubi_free_aeb(ai, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	ubi_assert(vol_id == av->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return update_vol(ubi, ai, av, new_vh, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * unmap_peb - unmap a PEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * If fastmap detects a free PEB in the pool it has to check whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * this PEB has been unmapped after writing the fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * @ai: UBI attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * @pnum: The PEB to be unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static void unmap_peb(struct ubi_attach_info *ai, int pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct rb_node *node, *node2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct ubi_ainf_peb *aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			if (aeb->pnum == pnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				rb_erase(&aeb->u.rb, &av->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				av->leb_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				ubi_free_aeb(ai, aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * scan_pool - scans a pool for changed (no longer empty PEBs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * @ai: attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * @pebs: an array of all PEB numbers in the to be scanned pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * @pool_size: size of the pool (number of entries in @pebs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * @max_sqnum: pointer to the maximal sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * @free: list of PEBs which are most likely free (and go into @ai->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		     struct list_head *free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct ubi_vid_io_buf *vb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct ubi_vid_hdr *vh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct ubi_ec_hdr *ech;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct ubi_ainf_peb *new_aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	int i, pnum, err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (!ech)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (!vb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		kfree(ech);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	vh = ubi_get_vid_hdr(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * Now scan all PEBs in the pool to find changes which have been made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * after the creation of the fastmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	for (i = 0; i < pool_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		int scrub = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		int image_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		pnum = be32_to_cpu(pebs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (ubi_io_is_bad(ubi, pnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			ubi_err(ubi, "bad PEB in fastmap pool!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (err && err != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				pnum, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		} else if (err == UBI_IO_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			scrub = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 * Older UBI implementations have image_seq set to zero, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		 * we shouldn't fail if image_seq == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		image_seq = be32_to_cpu(ech->image_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		if (image_seq && (image_seq != ubi->image_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				be32_to_cpu(ech->image_seq), ubi->image_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			unsigned long long ec = be64_to_cpu(ech->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			unmap_peb(ai, pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			dbg_bld("Adding PEB to free: %i", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			if (err == UBI_IO_FF_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				scrub = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			ret = add_aeb(ai, free, pnum, ec, scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			dbg_bld("Found non empty PEB:%i in pool", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			if (err == UBI_IO_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 				scrub = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			if (!new_aeb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			new_aeb->lnum = be32_to_cpu(vh->lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			new_aeb->copy_flag = vh->copy_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			new_aeb->scrub = scrub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			if (*max_sqnum < new_aeb->sqnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 				*max_sqnum = new_aeb->sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			/* We are paranoid and fall back to scanning mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	ubi_free_vid_buf(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	kfree(ech);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * count_fastmap_pebs - Counts the PEBs found by fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * @ai: The UBI attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static int count_fastmap_pebs(struct ubi_attach_info *ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct ubi_ainf_peb *aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct rb_node *rb1, *rb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	list_for_each_entry(aeb, &ai->erase, u.list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	list_for_each_entry(aeb, &ai->free, u.list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * @ai: UBI attach info object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * @fm: the fastmap to be attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static int ubi_attach_fastmap(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			      struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			      struct ubi_fastmap_layout *fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct list_head used, free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct ubi_ainf_volume *av;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	struct ubi_fm_sb *fmsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct ubi_fm_hdr *fmhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct ubi_fm_ec *fmec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct ubi_fm_volhdr *fmvhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct ubi_fm_eba *fm_eba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	int ret, i, j, pool_size, wl_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	size_t fm_pos = 0, fm_size = ubi->fm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned long long max_sqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	void *fm_raw = ubi->fm_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	INIT_LIST_HEAD(&used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	INIT_LIST_HEAD(&free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	ai->min_ec = UBI_MAX_ERASECOUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	fmsb = (struct ubi_fm_sb *)(fm_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	ai->max_sqnum = fmsb->sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	fm_pos += sizeof(struct ubi_fm_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	fm_pos += sizeof(*fmhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	fm_pos += sizeof(*fmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	fm_pos += sizeof(*fmpl_wl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	pool_size = be16_to_cpu(fmpl->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	wl_pool_size = be16_to_cpu(fmpl_wl->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		ubi_err(ubi, "bad pool size: %i", pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	    fm->max_pool_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	    fm->max_wl_pool_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		ubi_err(ubi, "bad maximal WL pool size: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			fm->max_wl_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	/* read EC values from free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		fm_pos += sizeof(*fmec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			      be32_to_cpu(fmec->ec), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* read EC values from used list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		fm_pos += sizeof(*fmec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			      be32_to_cpu(fmec->ec), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	/* read EC values from scrub list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		fm_pos += sizeof(*fmec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			      be32_to_cpu(fmec->ec), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/* read EC values from erase list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		fm_pos += sizeof(*fmec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			      be32_to_cpu(fmec->ec), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	/* Iterate over all volumes and read their EBA table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		fm_pos += sizeof(*fmvhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			     be32_to_cpu(fmvhdr->used_ebs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			     be32_to_cpu(fmvhdr->data_pad),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			     fmvhdr->vol_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			     be32_to_cpu(fmvhdr->last_eb_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		if (IS_ERR(av)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			if (PTR_ERR(av) == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 				ubi_err(ubi, "volume (ID %i) already exists",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 					fmvhdr->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		ai->vols_found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		fm_pos += sizeof(*fm_eba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		if (fm_pos >= fm_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			if (pnum < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			aeb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			list_for_each_entry(tmp_aeb, &used, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 				if (tmp_aeb->pnum == pnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 					aeb = tmp_aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			if (!aeb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 				goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			aeb->lnum = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			if (av->highest_lnum <= aeb->lnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				av->highest_lnum = aeb->lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			assign_aeb_to_av(ai, aeb, av);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				aeb->pnum, aeb->lnum, av->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (max_sqnum > ai->max_sqnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		ai->max_sqnum = max_sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		list_move_tail(&tmp_aeb->u.list, &ai->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		list_move_tail(&tmp_aeb->u.list, &ai->erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	ubi_assert(list_empty(&free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	 * If fastmap is leaking PEBs (must not happen), raise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	 * fat warning and fall back to scanning mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	 * We do this here because in ubi_wl_init() it's too late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	 * and we cannot fall back to scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		    ai->bad_peb_count - fm->used_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		goto fail_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) fail_bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		list_del(&tmp_aeb->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		ubi_free_aeb(ai, tmp_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		list_del(&tmp_aeb->u.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		ubi_free_aeb(ai, tmp_aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * find_fm_anchor - find the most recent Fastmap superblock (anchor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * @ai: UBI attach info to be filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static int find_fm_anchor(struct ubi_attach_info *ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct ubi_ainf_peb *aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	unsigned long long max_sqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	list_for_each_entry(aeb, &ai->fastmap, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			max_sqnum = aeb->sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			ret = aeb->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				      struct ubi_ainf_peb *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct ubi_ainf_peb *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	new->vol_id = old->vol_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	new->sqnum = old->sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	new->lnum = old->lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	new->scrub = old->scrub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	new->copy_flag = old->copy_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * ubi_scan_fastmap - scan the fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * @ai: UBI attach info to be filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * @scan_ai: UBI attach info from the first 64 PEBs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  *           used to find the most recent Fastmap data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * UBI_BAD_FASTMAP if one was found but is not usable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		     struct ubi_attach_info *scan_ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct ubi_fm_sb *fmsb, *fmsb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct ubi_vid_io_buf *vb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	struct ubi_vid_hdr *vh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct ubi_ec_hdr *ech;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	struct ubi_fastmap_layout *fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct ubi_ainf_peb *aeb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	int i, used_blocks, pnum, fm_anchor, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	size_t fm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	__be32 crc, tmp_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	unsigned long long sqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	fm_anchor = find_fm_anchor(scan_ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (fm_anchor < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		return UBI_NO_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* Copy all (possible) fastmap blocks into our new attach structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		struct ubi_ainf_peb *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		new = clone_aeb(ai, aeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		list_add(&new->u.list, &ai->fastmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	down_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	memset(ubi->fm_buf, 0, ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (!fmsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (!fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		kfree(fmsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (ret && ret != UBI_IO_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	else if (ret == UBI_IO_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		fm->to_be_tortured[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (fmsb->version != UBI_FM_FMT_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			fmsb->version, UBI_FM_FMT_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	used_blocks = be32_to_cpu(fmsb->used_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			used_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	fm_size = ubi->leb_size * used_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (fm_size != ubi->fm_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			fm_size, ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (!ech) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		goto free_fm_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (!vb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	vh = ubi_get_vid_hdr(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	for (i = 0; i < used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		int image_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		pnum = be32_to_cpu(fmsb->block_loc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		if (ubi_io_is_bad(ubi, pnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		if (i == 0 && pnum != fm_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 				pnum, fm_anchor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		if (ret && ret != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 				i, pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 				ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		} else if (ret == UBI_IO_BITFLIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			fm->to_be_tortured[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		image_seq = be32_to_cpu(ech->image_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (!ubi->image_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			ubi->image_seq = image_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		 * Older UBI implementations have image_seq set to zero, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 * we shouldn't fail if image_seq == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		if (image_seq && (image_seq != ubi->image_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			ubi_err(ubi, "wrong image seq:%d instead of %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				be32_to_cpu(ech->image_seq), ubi->image_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		if (ret && ret != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				i, pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 					be32_to_cpu(vh->vol_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 					UBI_FM_SB_VOLUME_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					be32_to_cpu(vh->vol_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 					UBI_FM_DATA_VOLUME_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (sqnum < be64_to_cpu(vh->sqnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			sqnum = be64_to_cpu(vh->sqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				       pnum, 0, ubi->leb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		if (ret && ret != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 				"err: %i)", i, pnum, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	kfree(fmsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	fmsb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	fmsb2->data_crc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (crc != tmp_crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		ubi_err(ubi, "fastmap data CRC is invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			tmp_crc, crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	fmsb2->sqnum = sqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	fm->used_blocks = used_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	ret = ubi_attach_fastmap(ubi, ai, fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			ret = UBI_BAD_FASTMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	for (i = 0; i < used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			goto free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		fm->e[i] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	ubi->fm = fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	ubi_msg(ubi, "attached by fastmap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	ubi_msg(ubi, "fastmap WL pool size: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		ubi->fm_wl_pool.max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	ubi->fm_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	ubi->fast_attach = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	ubi_free_vid_buf(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	kfree(ech);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	up_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (ret == UBI_BAD_FASTMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) free_hdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	ubi_free_vid_buf(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	kfree(ech);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) free_fm_sb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	kfree(fmsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	kfree(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	struct ubi_device *ubi = vol->ubi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (!ubi->fast_attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (!vol->checkmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	kfree(vol->checkmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  * ubi_write_fastmap - writes a fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  * @new_fm: the to be written fastmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int ubi_write_fastmap(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			     struct ubi_fastmap_layout *new_fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	size_t fm_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	void *fm_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	struct ubi_fm_sb *fmsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	struct ubi_fm_hdr *fmh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	struct ubi_fm_ec *fec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct ubi_fm_volhdr *fvh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	struct ubi_fm_eba *feba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct ubi_wl_entry *wl_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	struct ubi_volume *vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	struct ubi_vid_io_buf *avbuf, *dvbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	struct ubi_vid_hdr *avhdr, *dvhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	struct ubi_work *ubi_wrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct rb_node *tmp_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	int scrub_peb_count, erase_peb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	unsigned long *seen_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	fm_raw = ubi->fm_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	memset(ubi->fm_buf, 0, ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (!avbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (!dvbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		goto out_free_avbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	avhdr = ubi_get_vid_hdr(avbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	dvhdr = ubi_get_vid_hdr(dvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	seen_pebs = init_seen(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (IS_ERR(seen_pebs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		ret = PTR_ERR(seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		goto out_free_dvbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	spin_lock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	fmsb = (struct ubi_fm_sb *)fm_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	fm_pos += sizeof(*fmsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	fm_pos += sizeof(*fmh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	fmsb->version = UBI_FM_FMT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	/* the max sqnum will be filled in while *reading* the fastmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	fmsb->sqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	free_peb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	used_peb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	scrub_peb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	erase_peb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	vol_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	fm_pos += sizeof(*fmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	for (i = 0; i < ubi->fm_pool.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	fm_pos += sizeof(*fmpl_wl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		fec->pnum = cpu_to_be32(wl_e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		set_seen(ubi, wl_e->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		fec->ec = cpu_to_be32(wl_e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		free_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (ubi->fm_next_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		free_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		fec->pnum = cpu_to_be32(wl_e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		set_seen(ubi, wl_e->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		fec->ec = cpu_to_be32(wl_e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		used_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	ubi_for_each_protected_peb(ubi, i, wl_e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		fec->pnum = cpu_to_be32(wl_e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		set_seen(ubi, wl_e->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		fec->ec = cpu_to_be32(wl_e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		used_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		fec->pnum = cpu_to_be32(wl_e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		set_seen(ubi, wl_e->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		fec->ec = cpu_to_be32(wl_e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		scrub_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		if (ubi_is_erase_work(ubi_wrk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			wl_e = ubi_wrk->e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			ubi_assert(wl_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			fec->pnum = cpu_to_be32(wl_e->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			set_seen(ubi, wl_e->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			fec->ec = cpu_to_be32(wl_e->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			erase_peb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			fm_pos += sizeof(*fec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		vol = ubi->volumes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (!vol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		vol_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		fm_pos += sizeof(*fvh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		fvh->vol_id = cpu_to_be32(vol->vol_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		fvh->vol_type = vol->vol_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		fvh->data_pad = cpu_to_be32(vol->data_pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			vol->vol_type == UBI_STATIC_VOLUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		ubi_assert(fm_pos <= ubi->fm_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		for (j = 0; j < vol->reserved_pebs; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			struct ubi_eba_leb_desc ldesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			ubi_eba_get_ldesc(vol, j, &ldesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		feba->reserved_pebs = cpu_to_be32(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	fmh->vol_count = cpu_to_be32(vol_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	avhdr->lnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	spin_unlock(&ubi->volumes_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		goto out_free_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	for (i = 0; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	fmsb->data_crc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 					   ubi->fm_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	for (i = 1; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		dvhdr->lnum = cpu_to_be32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				new_fm->e[i]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			goto out_free_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	for (i = 0; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 					new_fm->e[i]->pnum, 0, ubi->leb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				new_fm->e[i]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			goto out_free_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	ubi_assert(new_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	ubi->fm = new_fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	ret = self_check_seen(ubi, seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	dbg_bld("fastmap written!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) out_free_seen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	free_seen(seen_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) out_free_dvbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	ubi_free_vid_buf(dvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) out_free_avbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	ubi_free_vid_buf(avbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  * erase_block - Manually erase a PEB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * @pnum: PEB to be erased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  * Returns the new EC value on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) static int erase_block(struct ubi_device *ubi, int pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct ubi_ec_hdr *ec_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	long long ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (!ec_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	else if (ret && ret != UBI_IO_BITFLIPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	ret = ubi_io_sync_erase(ubi, pnum, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	ec = be64_to_cpu(ec_hdr->ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	ec += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (ec > UBI_MAX_ERASECOUNTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	ec_hdr->ec = cpu_to_be64(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	ret = ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	kfree(ec_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * invalidate_fastmap - destroys a fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  * This function ensures that upon next UBI attach a full scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * is issued. We need this if UBI is about to write a new fastmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  * but is unable to do so. In this case we have two options:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)  * a) Make sure that the current fastmap will not be usued upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  * attach time and contine or b) fall back to RO mode to have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)  * current fastmap in a valid state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static int invalidate_fastmap(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	struct ubi_fastmap_layout *fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	struct ubi_wl_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	struct ubi_vid_io_buf *vb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	struct ubi_vid_hdr *vh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (!ubi->fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	ubi->fm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (!fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (!vb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		goto out_free_fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	vh = ubi_get_vid_hdr(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	e = ubi_wl_get_fm_peb(ubi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		goto out_free_fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	 * Create fake fastmap such that UBI will fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	 * to scanning mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		goto out_free_fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	fm->used_blocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	fm->e[0] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	ubi->fm = fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	ubi_free_vid_buf(vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) out_free_fm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	kfree(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  * return_fm_pebs - returns all PEBs used by a fastmap back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * WL sub-system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  * @fm: fastmap layout object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static void return_fm_pebs(struct ubi_device *ubi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			   struct ubi_fastmap_layout *fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	if (!fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	for (i = 0; i < fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		if (fm->e[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 					  fm->to_be_tortured[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)  * ubi_update_fastmap - will be called by UBI if a volume changes or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  * a fastmap pool becomes full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  * @ubi: UBI device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * Returns 0 on success, < 0 indicates an internal error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) int ubi_update_fastmap(struct ubi_device *ubi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	int ret, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	struct ubi_fastmap_layout *new_fm, *old_fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct ubi_wl_entry *tmp_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	down_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	down_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	down_write(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	ubi_refill_pools(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (ubi->ro_mode || ubi->fm_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		up_write(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		up_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		up_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (!new_fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		up_write(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		up_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		up_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	old_fm = ubi->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	ubi->fm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		ubi_err(ubi, "fastmap too large");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	for (i = 1; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		if (!tmp_e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			if (old_fm && old_fm->e[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 				ret = erase_block(ubi, old_fm->e[i]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 				if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 					ubi_err(ubi, "could not erase old fastmap PEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 					for (j = 1; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 								  j, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 						new_fm->e[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 					goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 				new_fm->e[i] = old_fm->e[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				old_fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 				ubi_err(ubi, "could not get any free erase block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 				for (j = 1; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 					new_fm->e[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 				ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			new_fm->e[i] = tmp_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			if (old_fm && old_fm->e[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 						  old_fm->to_be_tortured[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 				old_fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	/* Old fastmap is larger than the new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 					  old_fm->to_be_tortured[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			old_fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	spin_lock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	tmp_e = ubi->fm_anchor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	ubi->fm_anchor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	spin_unlock(&ubi->wl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	if (old_fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		/* no fresh anchor PEB was found, reuse the old one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		if (!tmp_e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			ret = erase_block(ubi, old_fm->e[0]->pnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 				ubi_err(ubi, "could not erase old anchor PEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 				for (i = 1; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 							  i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 					new_fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			new_fm->e[0] = old_fm->e[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			new_fm->e[0]->ec = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			old_fm->e[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			/* we've got a new anchor PEB, return the old one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 					  old_fm->to_be_tortured[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			new_fm->e[0] = tmp_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			old_fm->e[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (!tmp_e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			ubi_err(ubi, "could not find any anchor PEB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			for (i = 1; i < new_fm->used_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 				new_fm->e[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		new_fm->e[0] = tmp_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	ret = ubi_write_fastmap(ubi, new_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	up_write(&ubi->fm_eba_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	up_write(&ubi->work_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	up_write(&ubi->fm_protect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	kfree(old_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	ubi_ensure_anchor_pebs(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	ret = invalidate_fastmap(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		ubi_err(ubi, "Unable to invalidate current fastmap!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		ubi_ro_mode(ubi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return_fm_pebs(ubi, old_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		return_fm_pebs(ubi, new_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	kfree(new_fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }