^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef _SFC_NAND_MTD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _SFC_NAND_MTD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define CONFIG_MTD_NAND_BBT_USING_FLASH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef nand_bbt_block_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* BBT related functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) enum nand_bbt_block_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) NAND_BBT_BLOCK_STATUS_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) NAND_BBT_BLOCK_GOOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) NAND_BBT_BLOCK_WORN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) NAND_BBT_BLOCK_RESERVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) NAND_BBT_BLOCK_FACTORY_BAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) NAND_BBT_BLOCK_NUM_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* nand_bbt option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define NANDDEV_BBT_USE_FLASH BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define NANDDEV_BBT_SCANNED BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* The maximum number of blocks to scan for a bbt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define NANDDEV_BBT_SCAN_MAXBLOCKS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct snand_bbt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct snand_mtd_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct SFNAND_DEV *snand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct mutex *lock; /* to lock this object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct mtd_info mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u8 *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct snand_bbt bbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline unsigned int snanddev_neraseblocks(const struct snand_mtd_dev *nand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int ret = nand->mtd.size >> nand->mtd.erasesize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline bool snanddev_bbt_is_initialized(struct snand_mtd_dev *nand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return !!nand->bbt.cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline unsigned int snanddev_bbt_pos_to_entry(struct snand_mtd_dev *nand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) const loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return (unsigned int)(pos >> nand->mtd.erasesize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline struct mtd_info *snanddev_to_mtd(struct snand_mtd_dev *nand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return &nand->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline struct snand_mtd_dev *mtd_to_snanddev(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int snanddev_bbt_init(struct snand_mtd_dev *nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void snanddev_bbt_cleanup(struct snand_mtd_dev *nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int snanddev_bbt_update(struct snand_mtd_dev *nand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int snanddev_bbt_get_block_status(const struct snand_mtd_dev *nand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int snanddev_bbt_set_block_status(struct snand_mtd_dev *nand, unsigned int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) enum nand_bbt_block_status status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int sfc_nand_isbad_mtd(struct mtd_info *mtd, loff_t ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int sfc_nand_erase_mtd(struct mtd_info *mtd, u32 addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif